M7350v1_en_gpl

This commit is contained in:
T
2024-09-09 08:52:07 +00:00
commit f9cc65cfda
65988 changed files with 26357421 additions and 0 deletions

View File

@@ -0,0 +1,22 @@
#
# This class is used for architecture independent recipes/data files (usally scripts)
#
PACKAGE_ARCH = "all"
# No need for virtual/libc or a cross compiler
INHIBIT_DEFAULT_DEPS = "1"
# Set these to a common set of values, we shouldn't be using them other that for WORKDIR directory
# naming anyway
TARGET_ARCH = "allarch"
TARGET_OS = "linux"
TARGET_CC_ARCH = "none"
TARGET_LD_ARCH = "none"
TARGET_AS_ARCH = "none"
PACKAGE_EXTRA_ARCHS = ""
# No need to do shared library processing or debug symbol handling
EXCLUDE_FROM_SHLIBS = "1"
INHIBIT_PACKAGE_DEBUG_SPLIT = "1"
INHIBIT_PACKAGE_STRIP = "1"

View File

@@ -0,0 +1,186 @@
def autotools_dep_prepend(d):
if d.getVar('INHIBIT_AUTOTOOLS_DEPS', 1):
return ''
pn = d.getVar('PN', 1)
deps = ''
if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
return deps
deps += 'autoconf-native automake-native '
if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
deps += 'libtool-native '
if not bb.data.inherits_class('native', d) \
and not bb.data.inherits_class('cross', d) \
and not d.getVar('INHIBIT_DEFAULT_DEPS', 1):
deps += 'libtool-cross '
return deps + 'gnu-config-native '
EXTRA_OEMAKE = ""
DEPENDS_prepend = "${@autotools_dep_prepend(d)}"
inherit siteinfo
# Space separated list of shell scripts with variables defined to supply test
# results for autoconf tests we cannot run at build time.
export CONFIG_SITE = "${@siteinfo_get_files(d)}"
acpaths = "default"
EXTRA_AUTORECONF = "--exclude=autopoint"
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir}"
def autotools_set_crosscompiling(d):
if not bb.data.inherits_class('native', d):
return " cross_compiling=yes"
return ""
def append_libtool_sysroot(d):
# Only supply libtool sysroot option for non-native packages
if not bb.data.inherits_class('native', d):
return '--with-libtool-sysroot=${STAGING_DIR_HOST}'
return ""
# EXTRA_OECONF_append = "${@autotools_set_crosscompiling(d)}"
CONFIGUREOPTS = " --build=${BUILD_SYS} \
--host=${HOST_SYS} \
--target=${TARGET_SYS} \
--prefix=${prefix} \
--exec_prefix=${exec_prefix} \
--bindir=${bindir} \
--sbindir=${sbindir} \
--libexecdir=${libexecdir} \
--datadir=${datadir} \
--sysconfdir=${sysconfdir} \
--sharedstatedir=${sharedstatedir} \
--localstatedir=${localstatedir} \
--libdir=${libdir} \
--includedir=${includedir} \
--oldincludedir=${oldincludedir} \
--infodir=${infodir} \
--mandir=${mandir} \
--disable-silent-rules \
${CONFIGUREOPT_DEPTRACK} \
${@append_libtool_sysroot(d)}"
CONFIGUREOPT_DEPTRACK = "--disable-dependency-tracking"
oe_runconf () {
cfgscript="${S}/configure"
if [ -x "$cfgscript" ] ; then
bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@" || bbfatal "oe_runconf failed"
else
bbfatal "no configure script found at $cfgscript"
fi
}
AUTOTOOLS_AUXDIR ?= "${S}"
autotools_do_configure() {
case ${PN} in
autoconf*)
;;
automake*)
;;
*)
# WARNING: gross hack follows:
# An autotools built package generally needs these scripts, however only
# automake or libtoolize actually install the current versions of them.
# This is a problem in builds that do not use libtool or automake, in the case
# where we -need- the latest version of these scripts. e.g. running a build
# for a package whose autotools are old, on an x86_64 machine, which the old
# config.sub does not support. Work around this by installing them manually
# regardless.
( for ac in `find ${S} -name configure.in -o -name configure.ac`; do
rm -f `dirname $ac`/configure
done )
if [ -e ${S}/configure.in -o -e ${S}/configure.ac ]; then
olddir=`pwd`
cd ${S}
if [ x"${acpaths}" = xdefault ]; then
acpaths=
for i in `find ${S} -maxdepth 2 -name \*.m4|grep -v 'aclocal.m4'| \
grep -v 'acinclude.m4' | sed -e 's,\(.*/\).*$,\1,'|sort -u`; do
acpaths="$acpaths -I $i"
done
else
acpaths="${acpaths}"
fi
AUTOV=`automake --version |head -n 1 |sed "s/.* //;s/\.[0-9]\+$//"`
automake --version
echo "AUTOV is $AUTOV"
if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
acpaths="$acpaths -I${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
fi
# The aclocal directory could get modified by other processes
# uninstalling data from the sysroot. See Yocto #861 for details.
# We avoid this by taking a copy here and then files cannot disappear.
if [ -d ${STAGING_DATADIR}/aclocal ]; then
mkdir -p ${B}/aclocal-copy/
# for scratch build this directory can be empty
# so avoid cp's no files to copy error
cp -r ${STAGING_DATADIR}/aclocal/. ${B}/aclocal-copy/
acpaths="$acpaths -I ${B}/aclocal-copy/"
fi
# autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
# like it was auto-generated. Work around this by blowing it away
# by hand, unless the package specifically asked not to run aclocal.
if ! echo ${EXTRA_AUTORECONF} | grep -q "aclocal"; then
rm -f aclocal.m4
fi
if [ -e configure.in ]; then
CONFIGURE_AC=configure.in
else
CONFIGURE_AC=configure.ac
fi
if ! echo ${EXTRA_OECONF} | grep -q "\-\-disable-nls"; then
if grep "^[[:space:]]*AM_GLIB_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
if grep "sed.*POTFILES" $CONFIGURE_AC >/dev/null; then
: do nothing -- we still have an old unmodified configure.ac
else
bbnote Executing glib-gettextize --force --copy
echo "no" | glib-gettextize --force --copy
fi
else if grep "^[[:space:]]*AM_GNU_GETTEXT" $CONFIGURE_AC >/dev/null; then
# We'd call gettextize here if it wasn't so broken...
cp ${STAGING_DATADIR}/gettext/config.rpath ${AUTOTOOLS_AUXDIR}/
if [ ! -e ${S}/po/Makefile.in.in ]; then
cp ${STAGING_DATADIR}/gettext/po/Makefile.in.in ${S}/po/
fi
fi
fi
fi
mkdir -p m4
if grep "^[[:space:]]*[AI][CT]_PROG_INTLTOOL" $CONFIGURE_AC >/dev/null; then
bbnote Executing intltoolize --copy --force --automake
intltoolize --copy --force --automake
fi
bbnote Executing autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || bbfatal "autoreconf execution failed."
cd $olddir
fi
;;
esac
if [ -e ${S}/configure ]; then
oe_runconf
else
bbnote "nothing to configure"
fi
}
autotools_do_install() {
oe_runmake 'DESTDIR=${D}' install
# Info dir listing isn't interesting at this point so remove it if it exists.
if [ -e "${D}${infodir}/dir" ]; then
rm -f ${D}${infodir}/dir
fi
}
inherit siteconfig
EXPORT_FUNCTIONS do_configure do_install

View File

@@ -0,0 +1,2 @@
inherit autotools

View File

@@ -0,0 +1,510 @@
BB_DEFAULT_TASK ?= "build"
inherit patch
inherit staging
inherit mirrors
inherit utils
inherit utility-tasks
inherit metadata_scm
inherit logging
OE_IMPORTS += "os sys time oe.path oe.utils oe.data oe.packagegroup oe.sstatesig"
OE_IMPORTS[type] = "list"
def oe_import(d):
import os, sys
bbpath = d.getVar("BBPATH", True).split(":")
sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath]
def inject(name, value):
"""Make a python object accessible from the metadata"""
if hasattr(bb.utils, "_context"):
bb.utils._context[name] = value
else:
__builtins__[name] = value
import oe.data
for toimport in oe.data.typed_value("OE_IMPORTS", d):
imported = __import__(toimport)
inject(toimport.split(".", 1)[0], imported)
python oe_import_eh () {
if isinstance(e, bb.event.ConfigParsed):
oe_import(e.data)
}
addhandler oe_import_eh
die() {
bbfatal "$*"
}
oe_runmake() {
if [ x"$MAKE" = x ]; then MAKE=make; fi
bbnote ${MAKE} ${EXTRA_OEMAKE} "$@"
${MAKE} ${EXTRA_OEMAKE} "$@" || die "oe_runmake failed"
}
def base_dep_prepend(d):
#
# Ideally this will check a flag so we will operate properly in
# the case where host == build == target, for now we don't work in
# that case though.
#
deps = ""
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
if not d.getVar('INHIBIT_DEFAULT_DEPS'):
if (d.getVar('HOST_SYS', 1) !=
d.getVar('BUILD_SYS', 1)):
deps += " virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc "
return deps
BASEDEPENDS = "${@base_dep_prepend(d)}"
DEPENDS_prepend="${BASEDEPENDS} "
FILESPATH = "${@base_set_filespath([ "${FILE_DIRNAME}/${PF}", "${FILE_DIRNAME}/${P}", "${FILE_DIRNAME}/${PN}", "${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files", "${FILE_DIRNAME}" ], d)}"
# THISDIR only works properly with imediate expansion as it has to run
# in the context of the location its used (:=)
THISDIR = "${@os.path.dirname(d.getVar('FILE', True))}"
addtask fetch
do_fetch[dirs] = "${DL_DIR}"
python base_do_fetch() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
if len(src_uri) == 0:
return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
try:
fetcher = bb.fetch2.Fetch(src_uri, localdata)
fetcher.download()
except bb.fetch2.BBFetchException, e:
raise bb.build.FuncFailed(e)
}
addtask unpack after do_fetch
do_unpack[dirs] = "${WORKDIR}"
python base_do_unpack() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
if len(src_uri) == 0:
return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
rootdir = localdata.getVar('WORKDIR', True)
try:
fetcher = bb.fetch2.Fetch(src_uri, localdata)
fetcher.unpack(rootdir)
except bb.fetch2.BBFetchException, e:
raise bb.build.FuncFailed(e)
}
GIT_CONFIG = "${STAGING_DIR_NATIVE}/usr/etc/gitconfig"
def generate_git_config(e):
from bb import data
if data.getVar('GIT_CORE_CONFIG', e.data, True):
gitconfig_path = e.data.getVar('GIT_CONFIG', True)
proxy_command = " gitproxy = %s\n" % data.getVar('GIT_PROXY_COMMAND', e.data, True)
bb.mkdirhier(bb.data.expand("${STAGING_DIR_NATIVE}/usr/etc/", e.data))
if (os.path.exists(gitconfig_path)):
os.remove(gitconfig_path)
f = open(gitconfig_path, 'w')
f.write("[core]\n")
ignore_hosts = data.getVar('GIT_PROXY_IGNORE', e.data, True).split()
for ignore_host in ignore_hosts:
f.write(" gitproxy = none for %s\n" % ignore_host)
f.write(proxy_command)
f.close
def pkgarch_mapping(d):
# Compatibility mappings of TUNE_PKGARCH (opt in)
if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
d.setVar("TUNE_PKGARCH", "armv7a")
def preferred_ml_updates(d):
# If any PREFERRED_PROVIDER or PREFERRED_VERSIONS are set,
# we need to mirror these variables in the multilib case
multilibs = d.getVar('MULTILIBS', True) or ""
if not multilibs:
return
prefixes = []
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1 and eext[0] == 'multilib':
prefixes.append(eext[1])
versions = []
providers = []
for v in d.keys():
if v.startswith("PREFERRED_VERSION_"):
versions.append(v)
if v.startswith("PREFERRED_PROVIDER_"):
providers.append(v)
for v in versions:
val = d.getVar(v, False)
pkg = v.replace("PREFERRED_VERSION_", "")
if pkg.endswith("-native") or pkg.endswith("-nativesdk"):
continue
for p in prefixes:
newname = "PREFERRED_VERSION_" + p + "-" + pkg
if not d.getVar(newname, False):
d.setVar(newname, val)
for prov in providers:
val = d.getVar(prov, False)
pkg = prov.replace("PREFERRED_PROVIDER_", "")
if pkg.endswith("-native") or pkg.endswith("-nativesdk"):
continue
virt = ""
if pkg.startswith("virtual/"):
pkg = pkg.replace("virtual/", "")
virt = "virtual/"
for p in prefixes:
newname = "PREFERRED_PROVIDER_" + virt + p + "-" + pkg
if pkg != "kernel":
val = p + "-" + val
if not d.getVar(newname, False):
d.setVar(newname, val)
mp = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
extramp = []
for p in mp:
if p.endswith("-native") or p.endswith("-nativesdk"):
continue
virt = ""
if p.startswith("virtual/"):
p = p.replace("virtual/", "")
virt = "virtual/"
for pref in prefixes:
extramp.append(virt + pref + "-" + p)
d.setVar("MULTI_PROVIDER_WHITELIST", " ".join(mp + extramp))
def get_layers_branch_rev(d):
layers = (d.getVar("BBLAYERS", 1) or "").split()
layers_branch_rev = ["%-17s = \"%s:%s\"" % (os.path.basename(i), \
base_get_metadata_git_branch(i, None).strip(), \
base_get_metadata_git_revision(i, None)) \
for i in layers]
i = len(layers_branch_rev)-1
p1 = layers_branch_rev[i].find("=")
s1 = layers_branch_rev[i][p1:]
while i > 0:
p2 = layers_branch_rev[i-1].find("=")
s2= layers_branch_rev[i-1][p2:]
if s1 == s2:
layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2]
i -= 1
else:
i -= 1
p1 = layers_branch_rev[i].find("=")
s1= layers_branch_rev[i][p1:]
return layers_branch_rev
addhandler base_eventhandler
python base_eventhandler() {
from bb.event import getName
name = getName(e)
if name.startswith("BuildStarted"):
e.data.setVar( 'BB_VERSION', bb.__version__)
statusvars = ['BB_VERSION', 'TARGET_ARCH', 'TARGET_OS', 'MACHINE', 'DISTRO', 'DISTRO_VERSION','TUNE_FEATURES', 'TARGET_FPU']
statuslines = ["%-17s = \"%s\"" % (i, e.data.getVar(i, 1) or '') for i in statusvars]
statuslines += get_layers_branch_rev(e.data)
statusmsg = "\nOE Build Configuration:\n%s\n" % '\n'.join(statuslines)
bb.plain(statusmsg)
needed_vars = [ "TARGET_ARCH", "TARGET_OS" ]
pesteruser = []
for v in needed_vars:
val = e.data.getVar(v, 1)
if not val or val == 'INVALID':
pesteruser.append(v)
if pesteruser:
bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
if name == "ConfigParsed":
generate_git_config(e)
pkgarch_mapping(e.data)
preferred_ml_updates(e.data)
}
addtask configure after do_patch
do_configure[dirs] = "${CCACHE_DIR} ${S} ${B}"
do_configure[deptask] = "do_populate_sysroot"
base_do_configure() {
:
}
addtask compile after do_configure
do_compile[dirs] = "${S} ${B}"
base_do_compile() {
if [ -e Makefile -o -e makefile ]; then
oe_runmake || die "make failed"
else
bbnote "nothing to compile"
fi
}
addtask install after do_compile
do_install[dirs] = "${D} ${S} ${B}"
# Remove and re-create ${D} so that is it guaranteed to be empty
do_install[cleandirs] = "${D}"
base_do_install() {
:
}
base_do_package() {
:
}
addtask build after do_populate_sysroot
do_build = ""
do_build[func] = "1"
do_build[noexec] = "1"
do_build[recrdeptask] += "do_deploy"
do_build () {
:
}
python () {
import exceptions, string, re
# Handle PACKAGECONFIG
#
# These take the form:
#
# PACKAGECONFIG ?? = "<default options>"
# PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends"
pkgconfig = (d.getVar('PACKAGECONFIG', True) or "").split()
if pkgconfig:
def appendVar(varname, appends):
if not appends:
return
varname = bb.data.expand(varname, d)
content = d.getVar(varname, False) or ""
content = content + " " + " ".join(appends)
d.setVar(varname, content)
extradeps = []
extrardeps = []
extraconf = []
for flag, flagval in (d.getVarFlags("PACKAGECONFIG") or {}).items():
if flag == "defaultval":
continue
items = flagval.split(",")
if len(items) == 3:
enable, disable, depend = items
rdepend = ""
elif len(items) == 4:
enable, disable, depend, rdepend = items
if flag in pkgconfig:
extradeps.append(depend)
extrardeps.append(rdepend)
extraconf.append(enable)
else:
extraconf.append(disable)
appendVar('DEPENDS', extradeps)
appendVar('RDEPENDS_${PN}', extrardeps)
appendVar('EXTRA_OECONF', extraconf)
# If PRINC is set, try and increase the PR value by the amount specified
princ = d.getVar('PRINC', True)
if princ and princ != "0":
pr = d.getVar('PR', True)
pr_prefix = re.search("\D+",pr)
prval = re.search("\d+",pr)
if pr_prefix is None or prval is None:
bb.error("Unable to analyse format of PR variable: %s" % pr)
nval = int(prval.group(0)) + int(princ)
pr = pr_prefix.group(0) + str(nval) + pr[prval.end():]
d.setVar('PR', pr)
pn = d.getVar('PN', 1)
license = d.getVar('LICENSE', True)
if license == "INVALID":
bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn)
unmatched_license_flag = check_license_flags(d)
if unmatched_license_flag:
bb.debug(1, "Skipping %s because it has a restricted license not"
" whitelisted in LICENSE_FLAGS_WHITELIST" % pn)
raise bb.parse.SkipPackage("because it has a restricted license not"
" whitelisted in LICENSE_FLAGS_WHITELIST")
# If we're building a target package we need to use fakeroot (pseudo)
# in order to capture permissions, owners, groups and special files
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
d.setVarFlag('do_configure', 'umask', 022)
d.setVarFlag('do_compile', 'umask', 022)
deps = (d.getVarFlag('do_install', 'depends') or "").split()
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_install', 'depends', " ".join(deps))
d.setVarFlag('do_install', 'fakeroot', 1)
d.setVarFlag('do_install', 'umask', 022)
deps = (d.getVarFlag('do_package', 'depends') or "").split()
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package', 'depends', " ".join(deps))
d.setVarFlag('do_package', 'fakeroot', 1)
d.setVarFlag('do_package', 'umask', 022)
d.setVarFlag('do_package_setscene', 'fakeroot', 1)
source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
if not source_mirror_fetch:
need_host = d.getVar('COMPATIBLE_HOST', 1)
if need_host:
import re
this_host = d.getVar('HOST_SYS', 1)
if not re.match(need_host, this_host):
raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
need_machine = d.getVar('COMPATIBLE_MACHINE', 1)
if need_machine:
import re
this_machine = d.getVar('MACHINE', 1)
if this_machine and not re.match(need_machine, this_machine):
this_soc_family = d.getVar('SOC_FAMILY', 1)
if (this_soc_family and not re.match(need_machine, this_soc_family)) or not this_soc_family:
raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % this_machine)
dont_want_license = d.getVar('INCOMPATIBLE_LICENSE', 1)
if dont_want_license and not pn.endswith("-native") and not pn.endswith("-cross") and not pn.endswith("-cross-initial") and not pn.endswith("-cross-intermediate") and not pn.endswith("-crosssdk-intermediate") and not pn.endswith("-crosssdk") and not pn.endswith("-crosssdk-initial"):
hosttools_whitelist = (d.getVar('HOSTTOOLS_WHITELIST_%s' % dont_want_license, 1) or "").split()
lgplv2_whitelist = (d.getVar('LGPLv2_WHITELIST_%s' % dont_want_license, 1) or "").split()
dont_want_whitelist = (d.getVar('WHITELIST_%s' % dont_want_license, 1) or "").split()
if pn not in hosttools_whitelist and pn not in lgplv2_whitelist and pn not in dont_want_whitelist:
this_license = d.getVar('LICENSE', 1)
if incompatible_license(d,dont_want_license):
bb.note("SKIPPING %s because it's %s" % (pn, this_license))
raise bb.parse.SkipPackage("incompatible with license %s" % this_license)
srcuri = d.getVar('SRC_URI', 1)
# Svn packages should DEPEND on subversion-native
if "svn://" in srcuri:
depends = d.getVarFlag('do_fetch', 'depends') or ""
depends = depends + " subversion-native:do_populate_sysroot"
d.setVarFlag('do_fetch', 'depends', depends)
# Git packages should DEPEND on git-native
if "git://" in srcuri:
depends = d.getVarFlag('do_fetch', 'depends') or ""
depends = depends + " git-native:do_populate_sysroot"
d.setVarFlag('do_fetch', 'depends', depends)
# Mercurial packages should DEPEND on mercurial-native
elif "hg://" in srcuri:
depends = d.getVarFlag('do_fetch', 'depends') or ""
depends = depends + " mercurial-native:do_populate_sysroot"
d.setVarFlag('do_fetch', 'depends', depends)
# OSC packages should DEPEND on osc-native
elif "osc://" in srcuri:
depends = d.getVarFlag('do_fetch', 'depends') or ""
depends = depends + " osc-native:do_populate_sysroot"
d.setVarFlag('do_fetch', 'depends', depends)
# *.xz should depends on xz-native for unpacking
# Not endswith because of "*.patch.xz;patch=1". Need bb.decodeurl in future
if '.xz' in srcuri:
depends = d.getVarFlag('do_unpack', 'depends') or ""
depends = depends + " xz-native:do_populate_sysroot"
d.setVarFlag('do_unpack', 'depends', depends)
# unzip-native should already be staged before unpacking ZIP recipes
if ".zip" in srcuri:
depends = d.getVarFlag('do_unpack', 'depends') or ""
depends = depends + " unzip-native:do_populate_sysroot"
d.setVarFlag('do_unpack', 'depends', depends)
# 'multimachine' handling
mach_arch = d.getVar('MACHINE_ARCH', 1)
pkg_arch = d.getVar('PACKAGE_ARCH', 1)
if (pkg_arch == mach_arch):
# Already machine specific - nothing further to do
return
#
# We always try to scan SRC_URI for urls with machine overrides
# unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0
#
override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH', 1)
if override != '0':
paths = []
for p in [ "${PF}", "${P}", "${PN}", "files", "" ]:
path = bb.data.expand(os.path.join("${FILE_DIRNAME}", p, "${MACHINE}"), d)
if os.path.isdir(path):
paths.append(path)
if len(paths) != 0:
for s in srcuri.split():
if not s.startswith("file://"):
continue
fetcher = bb.fetch2.Fetch([s], d)
local = fetcher.localpath(s)
for mp in paths:
if local.startswith(mp):
#bb.note("overriding PACKAGE_ARCH from %s to %s" % (pkg_arch, mach_arch))
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
return
packages = d.getVar('PACKAGES', 1).split()
for pkg in packages:
pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg, 1)
# We could look for != PACKAGE_ARCH here but how to choose
# if multiple differences are present?
# Look through PACKAGE_ARCHS for the priority order?
if pkgarch and pkgarch == mach_arch:
d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}")
bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
}
addtask cleansstate after do_clean
python do_cleansstate() {
sstate_clean_cachefiles(d)
}
addtask cleanall after do_cleansstate
python do_cleanall() {
src_uri = (d.getVar('SRC_URI', True) or "").split()
if len(src_uri) == 0:
return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
try:
fetcher = bb.fetch2.Fetch(src_uri, localdata)
fetcher.clean()
except bb.fetch2.BBFetchException, e:
raise bb.build.FuncFailed(e)
}
do_cleanall[nostamp] = "1"
EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package

View File

@@ -0,0 +1,54 @@
# The namespaces can clash here hence the two step replace
def get_binconfig_mangle(d):
s = "-e ''"
if not bb.data.inherits_class('native', d):
optional_quote = r"\(\"\?\)"
s += " -e 's:=%s${libdir}:=\\1OELIBDIR:;'" % optional_quote
s += " -e 's:=%s${includedir}:=\\1OEINCDIR:;'" % optional_quote
s += " -e 's:=%s${datadir}:=\\1OEDATADIR:'" % optional_quote
s += " -e 's:=%s${prefix}/:=\\1OEPREFIX/:'" % optional_quote
s += " -e 's:=%s${exec_prefix}/:=\\1OEEXECPREFIX/:'" % optional_quote
s += " -e 's:-L${libdir}:-LOELIBDIR:;'"
s += " -e 's:-I${includedir}:-IOEINCDIR:;'"
s += " -e 's:OELIBDIR:${STAGING_LIBDIR}:;'"
s += " -e 's:OEINCDIR:${STAGING_INCDIR}:;'"
s += " -e 's:OEDATADIR:${STAGING_DATADIR}:'"
s += " -e 's:OEPREFIX:${STAGING_DIR_HOST}${prefix}:'"
s += " -e 's:OEEXECPREFIX:${STAGING_DIR_HOST}${exec_prefix}:'"
s += " -e 's:-I${WORKDIR}:-I${STAGING_INCDIR}:'"
s += " -e 's:-L${WORKDIR}:-L${STAGING_LIBDIR}:'"
return s
BINCONFIG_GLOB ?= "*-config"
PACKAGE_PREPROCESS_FUNCS += "binconfig_package_preprocess"
binconfig_package_preprocess () {
for config in `find ${PKGD} -name '${BINCONFIG_GLOB}'`; do
sed -i \
-e 's:${STAGING_LIBDIR}:${libdir}:g;' \
-e 's:${STAGING_INCDIR}:${includedir}:g;' \
-e 's:${STAGING_DATADIR}:${datadir}:' \
-e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
$config
done
for lafile in `find ${PKGD} -name "*.la"` ; do
sed -i \
-e 's:${STAGING_LIBDIR}:${libdir}:g;' \
-e 's:${STAGING_INCDIR}:${includedir}:g;' \
-e 's:${STAGING_DATADIR}:${datadir}:' \
-e 's:${STAGING_DIR_HOST}${prefix}:${prefix}:' \
$lafile
done
}
SYSROOT_PREPROCESS_FUNCS += "binconfig_sysroot_preprocess"
binconfig_sysroot_preprocess () {
for config in `find ${S} -name '${BINCONFIG_GLOB}'`; do
configname=`basename $config`
install -d ${SYSROOT_DESTDIR}${bindir_crossscripts}
cat $config | sed ${@get_binconfig_mangle(d)} > ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
chmod u+x ${SYSROOT_DESTDIR}${bindir_crossscripts}/$configname
done
}

View File

@@ -0,0 +1,94 @@
# boot-directdisk.bbclass
# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
#
# Create an image which can be placed directly onto a harddisk using dd and then
# booted.
#
# This uses syslinux. extlinux would have been nice but required the ext2/3
# partition to be mounted. grub requires to run itself as part of the install
# process.
#
# The end result is a 512 boot sector populated with an MBR and partition table
# followed by an msdos fat16 partition containing syslinux and a linux kernel
# completed by the ext2/3 rootfs.
#
# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
# won't touch fat12 partitions.
# External variables needed
# ${ROOTFS} - the rootfs image to incorporate
do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
syslinux:do_populate_sysroot \
syslinux-native:do_populate_sysroot \
parted-native:do_populate_sysroot \
mtools-native:do_populate_sysroot "
PACKAGES = " "
EXCLUDE_FROM_WORLD = "1"
HDDDIR = "${S}/hdd/boot"
HDDIMG = "${S}/hdd.image"
BOOTDD_VOLUME_ID ?= "boot"
BOOTDD_EXTRA_SPACE ?= "16384"
# Get the build_syslinux_cfg() function from the syslinux class
AUTO_SYSLINUXCFG = "1"
LABELS = "boot"
APPEND = "root=/dev/sda2"
TIMEOUT = "10"
SYSLINUXCFG = "${HDDDIR}/syslinux.cfg"
SYSLINUXMENU = "${HDDDIR}/menu"
inherit syslinux
build_boot_dd() {
IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
install -d ${HDDDIR}
install -m 0644 ${STAGING_DIR_HOST}/kernel/bzImage ${HDDDIR}/vmlinuz
install -m 444 ${STAGING_LIBDIR}/syslinux/ldlinux.sys ${HDDDIR}/ldlinux.sys
BLOCKS=`du -bks ${HDDDIR} | cut -f 1`
SIZE=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
mkdosfs -n ${BOOTDD_VOLUME_ID} -d ${HDDDIR} -C ${HDDIMG} $SIZE
syslinux ${HDDIMG}
chmod 644 ${HDDIMG}
ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
TOTALSIZE=`expr $SIZE + $ROOTFSBLOCKS`
END1=`expr $SIZE \* 1024`
END2=`expr $END1 + 512`
END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
rm -rf $IMAGE
dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
parted $IMAGE mklabel msdos
parted $IMAGE mkpart primary fat16 0 ${END1}B
parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
parted $IMAGE set 1 boot on
parted $IMAGE print
OFFSET=`expr $END2 / 512`
dd if=${STAGING_LIBDIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
dd if=${HDDIMG} of=$IMAGE conv=notrunc seek=1 bs=512
dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
cd ${DEPLOY_DIR_IMAGE}
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
}
python do_bootdirectdisk() {
bb.build.exec_func('build_syslinux_cfg', d)
bb.build.exec_func('build_boot_dd', d)
}
addtask bootdirectdisk before do_build
do_bootdirectdisk[nostamp] = "1"

View File

@@ -0,0 +1,171 @@
# Copyright (C) 2004, Advanced Micro Devices, Inc. All Rights Reserved
# Released under the MIT license (see packages/COPYING)
# Creates a bootable image using syslinux, your kernel and an optional
# initrd
#
# End result is two things:
#
# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
# an initrd and a rootfs image. These can be written to harddisks directly and
# also booted on USB flash disks (write them there with dd).
#
# 2. A CD .iso image
# Boot process is that the initrd will boot and process which label was selected
# in syslinux. Actions based on the label are then performed (e.g. installing to
# an hdd)
# External variables (also used by syslinux.bbclass)
# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
# ${NOISO} - skip building the ISO image if set to 1
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
mtools-native:do_populate_sysroot \
cdrtools-native:do_populate_sysroot"
PACKAGES = " "
EXCLUDE_FROM_WORLD = "1"
HDDDIR = "${S}/hdd/boot"
ISODIR = "${S}/cd"
BOOTIMG_VOLUME_ID ?= "boot"
BOOTIMG_EXTRA_SPACE ?= "512"
EFI = ${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}
EFI_CLASS = ${@base_contains("MACHINE_FEATURES", "efi", "grub-efi", "dummy", d)}
# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
# contain "efi". This way legacy is supported by default if neither is
# specified, maintaining the original behavior.
def pcbios(d):
pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
if pcbios == "0":
pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
return pcbios
def pcbios_class(d):
if d.getVar("PCBIOS", True) == "1":
return "syslinux"
return "dummy"
PCBIOS = ${@pcbios(d)}
PCBIOS_CLASS = ${@pcbios_class(d)}
inherit ${PCBIOS_CLASS}
inherit ${EFI_CLASS}
build_iso() {
# Only create an ISO if we have an INITRD and NOISO was not set
if [ -z "${INITRD}" ] || [ ! -s "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
bbnote "ISO image will not be created."
return
fi
install -d ${ISODIR}
if [ "${PCBIOS}" = "1" ]; then
syslinux_iso_populate
fi
if [ "${EFI}" = "1" ]; then
grubefi_iso_populate
fi
if [ "${PCBIOS}" = "1" ]; then
mkisofs -V ${BOOTIMG_VOLUME_ID} \
-o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} -r \
${MKISOFS_OPTIONS} ${ISODIR}
else
bbnote "EFI-only ISO images are untested, please provide feedback."
mkisofs -V ${BOOTIMG_VOLUME_ID} \
-o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
-r ${ISODIR}
fi
cd ${DEPLOY_DIR_IMAGE}
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
}
build_hddimg() {
# Create an HDD image
if [ "${NOHDD}" != "1" ] ; then
install -d ${HDDDIR}
if [ "${PCBIOS}" = "1" ]; then
syslinux_hddimg_populate
fi
if [ "${EFI}" = "1" ]; then
grubefi_hddimg_populate
fi
# Calculate the size required for the final image including the
# data and filesystem overhead.
# Sectors: 512 bytes
# Blocks: 1024 bytes
# Determine the sector count just for the data
SECTORS=$(expr $(du --apparent-size -ks ${HDDDIR} | cut -f 1) \* 2)
# Account for the filesystem overhead. This includes directory
# entries in the clusters as well as the FAT itself.
# Assumptions:
# < 16 entries per directory
# 8.3 filenames only
# 32 bytes per dir entry
DIR_BYTES=$(expr $(find ${HDDDIR} | tail -n +2 | wc -l) \* 32)
# 32 bytes for every end-of-directory dir entry
DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${HDDDIR} -type d | tail -n +2 | wc -l) \* 32))
# 4 bytes per FAT entry per sector of data
FAT_BYTES=$(expr $SECTORS \* 4)
# 4 bytes per FAT entry per end-of-cluster list
FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${HDDDIR} -type d | tail -n +2 | wc -l) \* 4))
# Use a ceiling function to determine FS overhead in sectors
DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
# There are two FATs on the image
FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
# Determine the final size in blocks accounting for some padding
BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
# Ensure total sectors is an integral number of sectors per
# track or mcopy will complain. Sectors are 512 bytes, and we
# generate images with 32 sectors per track. This calculation is
# done in blocks, thus the mod by 16 instead of 32.
BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
IMG=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
mkdosfs -F 32 -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${IMG} ${BLOCKS}
# Copy HDDDIR recursively into the image file directly
mcopy -i ${IMG} -s ${HDDDIR}/* ::/
if [ "${PCBIOS}" = "1" ]; then
syslinux_hddimg_install
fi
chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
cd ${DEPLOY_DIR_IMAGE}
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
fi
}
python do_bootimg() {
if d.getVar("PCBIOS", True) == "1":
bb.build.exec_func('build_syslinux_cfg', d)
if d.getVar("EFI", True) == "1":
bb.build.exec_func('build_grub_cfg', d)
bb.build.exec_func('build_hddimg', d)
bb.build.exec_func('build_iso', d)
}
addtask bootimg before do_build
do_bootimg[nostamp] = "1"

View File

@@ -0,0 +1,186 @@
#
# Small event handler to automatically open URLs and file
# bug reports at a bugzilla of your choiche
# it uses XML-RPC interface, so you must have it enabled
#
# Before using you must define BUGZILLA_USER, BUGZILLA_PASS credentials,
# BUGZILLA_XMLRPC - uri of xmlrpc.cgi,
# BUGZILLA_PRODUCT, BUGZILLA_COMPONENT - a place in BTS for build bugs
# BUGZILLA_VERSION - version against which to report new bugs
#
def bugzilla_find_bug_report(debug_file, server, args, bugname):
args['summary'] = bugname
bugs = server.Bug.search(args)
if len(bugs['bugs']) == 0:
print >> debug_file, "Bugs not found"
return (False,None)
else: # silently pick the first result
print >> debug_file, "Result of bug search is "
print >> debug_file, bugs
status = bugs['bugs'][0]['status']
id = bugs['bugs'][0]['id']
return (not status in ["CLOSED", "RESOLVED", "VERIFIED"],id)
def bugzilla_file_bug(debug_file, server, args, name, text, version):
args['summary'] = name
args['comment'] = text
args['version'] = version
args['op_sys'] = 'Linux'
args['platform'] = 'Other'
args['severity'] = 'normal'
args['priority'] = 'Normal'
try:
return server.Bug.create(args)['id']
except Exception, e:
print >> debug_file, repr(e)
return None
def bugzilla_reopen_bug(debug_file, server, args, bug_number):
args['ids'] = [bug_number]
args['status'] = "CONFIRMED"
try:
server.Bug.update(args)
return True
except Exception, e:
print >> debug_file, repr(e)
return False
def bugzilla_create_attachment(debug_file, server, args, bug_number, text, file_name, log, logdescription):
args['ids'] = [bug_number]
args['file_name'] = file_name
args['summary'] = logdescription
args['content_type'] = "text/plain"
args['data'] = log
args['comment'] = text
try:
server.Bug.add_attachment(args)
return True
except Exception, e:
print >> debug_file, repr(e)
return False
def bugzilla_add_comment(debug_file, server, args, bug_number, text):
args['id'] = bug_number
args['comment'] = text
try:
server.Bug.add_comment(args)
return True
except Exception, e:
print >> debug_file, repr(e)
return False
addhandler bugzilla_eventhandler
python bugzilla_eventhandler() {
import bb, os, glob
import xmlrpclib, httplib
class ProxiedTransport(xmlrpclib.Transport):
def __init__(self, proxy, use_datetime = 0):
xmlrpclib.Transport.__init__(self, use_datetime)
self.proxy = proxy
self.user = None
self.password = None
def set_user(self, user):
self.user = user
def set_password(self, password):
self.password = password
def make_connection(self, host):
self.realhost = host
return httplib.HTTP(self.proxy)
def send_request(self, connection, handler, request_body):
connection.putrequest("POST", 'http://%s%s' % (self.realhost, handler))
if self.user != None:
if self.password != None:
auth = "%s:%s" % (self.user, self.password)
else:
auth = self.user
connection.putheader("Proxy-authorization", "Basic " + base64.encodestring(auth))
event = e
data = e.data
name = bb.event.getName(event)
if name == "MsgNote":
# avoid recursion
return
if name == "TaskFailed":
xmlrpc = data.getVar("BUGZILLA_XMLRPC", True)
user = data.getVar("BUGZILLA_USER", True)
passw = data.getVar("BUGZILLA_PASS", True)
product = data.getVar("BUGZILLA_PRODUCT", True)
compon = data.getVar("BUGZILLA_COMPONENT", True)
version = data.getVar("BUGZILLA_VERSION", True)
proxy = data.getVar('http_proxy', True )
if (proxy):
import urllib2
s, u, p, hostport = urllib2._parse_proxy(proxy)
transport = ProxiedTransport(hostport)
else:
transport = None
server = xmlrpclib.ServerProxy(xmlrpc, transport=transport, verbose=0)
args = {
'Bugzilla_login': user,
'Bugzilla_password': passw,
'product': product,
'component': compon}
# evil hack to figure out what is going on
debug_file = open(os.path.join(data.getVar("TMPDIR", True),"..","bugzilla-log"),"a")
file = None
bugname = "%(package)s-%(pv)s-autobuild" % { "package" : data.getVar("PN", True),
"pv" : data.getVar("PV", True),
}
log_file = glob.glob("%s/log.%s.*" % (event.data.getVar('T', True), event.task))
text = "The %s step in %s failed at %s for machine %s" % (e.task, data.getVar("PN", True), data.getVar('DATETIME', True), data.getVar( 'MACHINE', True ) )
if len(log_file) != 0:
print >> debug_file, "Adding log file %s" % log_file[0]
file = open(log_file[0], 'r')
log = file.read()
file.close();
else:
print >> debug_file, "No log file found for the glob"
log = None
(bug_open, bug_number) = bugzilla_find_bug_report(debug_file, server, args.copy(), bugname)
print >> debug_file, "Bug is open: %s and bug number: %s" % (bug_open, bug_number)
# The bug is present and still open, attach an error log
if not bug_number:
bug_number = bugzilla_file_bug(debug_file, server, args.copy(), bugname, text, version)
if not bug_number:
print >> debug_file, "Couldn't acquire a new bug_numer, filing a bugreport failed"
else:
print >> debug_file, "The new bug_number: '%s'" % bug_number
elif not bug_open:
if not bugzilla_reopen_bug(debug_file, server, args.copy(), bug_number):
print >> debug_file, "Failed to reopen the bug #%s" % bug_number
else:
print >> debug_file, "Reopened the bug #%s" % bug_number
if bug_number and log:
print >> debug_file, "The bug is known as '%s'" % bug_number
desc = "Build log for machine %s" % (data.getVar('MACHINE', True))
if not bugzilla_create_attachment(debug_file, server, args.copy(), bug_number, text, log_file[0], log, desc):
print >> debug_file, "Failed to attach the build log for bug #%s" % bug_number
else:
print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
else:
print >> debug_file, "Not trying to create an attachment for bug #%s" % bug_number
if not bugzilla_add_comment(debug_file, server, args.copy(), bug_number, text, ):
print >> debug_file, "Failed to create a comment the build log for bug #%s" % bug_number
else:
print >> debug_file, "Created an attachment for '%s' '%s' '%s'" % (product, compon, bug_number)
# store bug number for oestats-client
if bug_number:
data.setVar('OESTATS_BUG_NUMBER', bug_number)
}

View File

@@ -0,0 +1,383 @@
#
# Records history of build output in order to detect regressions
#
# Based in part on testlab.bbclass and packagehistory.bbclass
#
# Copyright (C) 2011 Intel Corporation
# Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
#
BUILDHISTORY_DIR ?= "${TMPDIR}/buildhistory"
BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
BUILDHISTORY_COMMIT ?= "0"
BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
BUILDHISTORY_PUSH_REPO ?= ""
# Must inherit package first before changing PACKAGEFUNCS
inherit package
PACKAGEFUNCS += "buildhistory_emit_pkghistory"
#
# Called during do_package to write out metadata about this package
# for comparision when writing future packages
#
python buildhistory_emit_pkghistory() {
import re
pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
class RecipeInfo:
def __init__(self, name):
self.name = name
self.pe = "0"
self.pv = "0"
self.pr = "r0"
self.depends = ""
self.packages = ""
class PackageInfo:
def __init__(self, name):
self.name = name
self.pe = "0"
self.pv = "0"
self.pr = "r0"
self.size = 0
self.depends = ""
self.rdepends = ""
self.rrecommends = ""
self.files = ""
self.filelist = ""
# Should check PACKAGES here to see if anything removed
def getpkgvar(pkg, var):
val = bb.data.getVar('%s_%s' % (var, pkg), d, 1)
if val:
return val
val = bb.data.getVar('%s' % (var), d, 1)
return val
def readRecipeInfo(pn, histfile):
rcpinfo = RecipeInfo(pn)
f = open(histfile, "r")
try:
for line in f:
lns = line.split('=')
name = lns[0].strip()
value = lns[1].strip(" \t\r\n").strip('"')
if name == "PE":
rcpinfo.pe = value
elif name == "PV":
rcpinfo.pv = value
elif name == "PR":
rcpinfo.pr = value
elif name == "DEPENDS":
rcpinfo.depends = value
elif name == "PACKAGES":
rcpinfo.packages = value
finally:
f.close()
return rcpinfo
def readPackageInfo(pkg, histfile):
pkginfo = PackageInfo(pkg)
f = open(histfile, "r")
try:
for line in f:
lns = line.split('=')
name = lns[0].strip()
value = lns[1].strip(" \t\r\n").strip('"')
if name == "PE":
pkginfo.pe = value
elif name == "PV":
pkginfo.pv = value
elif name == "PR":
pkginfo.pr = value
elif name == "RDEPENDS":
pkginfo.rdepends = value
elif name == "RRECOMMENDS":
pkginfo.rrecommends = value
elif name == "PKGSIZE":
pkginfo.size = long(value)
elif name == "FILES":
pkginfo.files = value
elif name == "FILELIST":
pkginfo.filelist = value
finally:
f.close()
return pkginfo
def getlastrecipeversion(pn):
try:
histfile = os.path.join(pkghistdir, "latest")
return readRecipeInfo(pn, histfile)
except EnvironmentError:
return None
def getlastpkgversion(pkg):
try:
histfile = os.path.join(pkghistdir, pkg, "latest")
return readPackageInfo(pkg, histfile)
except EnvironmentError:
return None
def squashspaces(string):
return re.sub("\s+", " ", string)
pn = d.getVar('PN', True)
pe = d.getVar('PE', True) or "0"
pv = d.getVar('PV', True)
pr = d.getVar('PR', True)
packages = squashspaces(d.getVar('PACKAGES', True))
rcpinfo = RecipeInfo(pn)
rcpinfo.pe = pe
rcpinfo.pv = pv
rcpinfo.pr = pr
rcpinfo.depends = squashspaces(d.getVar('DEPENDS', True) or "")
rcpinfo.packages = packages
write_recipehistory(rcpinfo, d)
write_latestlink(None, pe, pv, pr, d)
# Apparently the version can be different on a per-package basis (see Python)
pkgdest = d.getVar('PKGDEST', True)
for pkg in packages.split():
pe = getpkgvar(pkg, 'PE') or "0"
pv = getpkgvar(pkg, 'PV')
pr = getpkgvar(pkg, 'PR')
#
# Find out what the last version was
# Make sure the version did not decrease
#
lastversion = getlastpkgversion(pkg)
if lastversion:
last_pe = lastversion.pe
last_pv = lastversion.pv
last_pr = lastversion.pr
r = bb.utils.vercmp((pe, pv, pr), (last_pe, last_pv, last_pr))
if r < 0:
bb.error("Package version for package %s went backwards which would break package feeds from (%s:%s-%s to %s:%s-%s)" % (pkg, last_pe, last_pv, last_pr, pe, pv, pr))
pkginfo = PackageInfo(pkg)
pkginfo.pe = pe
pkginfo.pv = pv
pkginfo.pr = pr
pkginfo.rdepends = squashspaces(getpkgvar(pkg, 'RDEPENDS') or "")
pkginfo.rrecommends = squashspaces(getpkgvar(pkg, 'RRECOMMENDS') or "")
pkginfo.files = squashspaces(getpkgvar(pkg, 'FILES') or "")
# Gather information about packaged files
pkgdestpkg = os.path.join(pkgdest, pkg)
filelist = []
pkginfo.size = 0
for root, dirs, files in os.walk(pkgdestpkg):
relpth = os.path.relpath(root, pkgdestpkg)
for f in files:
fstat = os.lstat(os.path.join(root, f))
pkginfo.size += fstat.st_size
filelist.append(os.sep + os.path.join(relpth, f))
pkginfo.filelist = " ".join(filelist)
write_pkghistory(pkginfo, d)
write_latestlink(pkg, pe, pv, pr, d)
}
def write_recipehistory(rcpinfo, d):
bb.debug(2, "Writing recipe history")
pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
if not os.path.exists(pkghistdir):
os.makedirs(pkghistdir)
verfile = os.path.join(pkghistdir, "%s:%s-%s" % (rcpinfo.pe, rcpinfo.pv, rcpinfo.pr))
f = open(verfile, "w")
try:
if rcpinfo.pe != "0":
f.write("PE = %s\n" % rcpinfo.pe)
f.write("PV = %s\n" % rcpinfo.pv)
f.write("PR = %s\n" % rcpinfo.pr)
f.write("DEPENDS = %s\n" % rcpinfo.depends)
f.write("PACKAGES = %s\n" % rcpinfo.packages)
finally:
f.close()
def write_pkghistory(pkginfo, d):
bb.debug(2, "Writing package history")
pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
verpath = os.path.join(pkghistdir, pkginfo.name)
if not os.path.exists(verpath):
os.makedirs(verpath)
verfile = os.path.join(verpath, "%s:%s-%s" % (pkginfo.pe, pkginfo.pv, pkginfo.pr))
f = open(verfile, "w")
try:
if pkginfo.pe != "0":
f.write("PE = %s\n" % pkginfo.pe)
f.write("PV = %s\n" % pkginfo.pv)
f.write("PR = %s\n" % pkginfo.pr)
f.write("RDEPENDS = %s\n" % pkginfo.rdepends)
f.write("RRECOMMENDS = %s\n" % pkginfo.rrecommends)
f.write("PKGSIZE = %d\n" % pkginfo.size)
f.write("FILES = %s\n" % pkginfo.files)
f.write("FILELIST = %s\n" % pkginfo.filelist)
finally:
f.close()
def write_latestlink(pkg, pe, pv, pr, d):
import shutil
pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
def rm_link(path):
try:
os.unlink(path)
except OSError:
return
if pkg:
filedir = os.path.join(pkghistdir, pkg)
else:
filedir = pkghistdir
latest_file = os.path.join(filedir, "latest")
ver_file = os.path.join(filedir, "%s:%s-%s" % (pe, pv, pr))
rm_link(latest_file)
if d.getVar('BUILDHISTORY_KEEP_VERSIONS', True) == '1':
shutil.copy(ver_file, latest_file)
else:
shutil.move(ver_file, latest_file)
buildhistory_get_image_installed() {
# Anything requiring the use of the packaging system should be done in here
# in case the packaging files are going to be removed for this image
mkdir -p ${BUILDHISTORY_DIR_IMAGE}
# Get list of installed packages
list_installed_packages | sort > ${BUILDHISTORY_DIR_IMAGE}/installed-package-names.txt
INSTALLED_PKGS=`cat ${BUILDHISTORY_DIR_IMAGE}/installed-package-names.txt`
# Produce installed package file and size lists and dependency graph
echo -n > ${BUILDHISTORY_DIR_IMAGE}/installed-packages.txt
echo -n > ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp
echo -e "digraph depends {\n node [shape=plaintext]" > ${BUILDHISTORY_DIR_IMAGE}/depends.dot
for pkg in $INSTALLED_PKGS; do
pkgfile=`get_package_filename $pkg`
echo `basename $pkgfile` >> ${BUILDHISTORY_DIR_IMAGE}/installed-packages.txt
if [ -f $pkgfile ] ; then
pkgsize=`du -k $pkgfile | head -n1 | awk '{ print $1 }'`
echo $pkgsize $pkg >> ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp
fi
deps=`list_package_depends $pkg`
for dep in $deps ; do
echo "$pkg OPP $dep;" | sed -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' | sed 's:OPP:->:g' >> ${BUILDHISTORY_DIR_IMAGE}/depends.dot
done
recs=`list_package_recommends $pkg`
for rec in $recs ; do
echo "$pkg OPP $rec [style=dotted];" | sed -e 's:-:_:g' -e 's:\.:_:g' -e 's:+::g' | sed 's:OPP:->:g' >> ${BUILDHISTORY_DIR_IMAGE}/depends.dot
done
done
echo "}" >> ${BUILDHISTORY_DIR_IMAGE}/depends.dot
cat ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.txt
rm ${BUILDHISTORY_DIR_IMAGE}/installed-package-sizes.tmp
# Produce some cut-down graphs (for readability)
grep -v kernel_image ${BUILDHISTORY_DIR_IMAGE}/depends.dot | grep -v kernel_2 | grep -v kernel_3 > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel.dot
grep -v libc6 ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel.dot | grep -v libgcc > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc.dot
grep -v update_ ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc.dot > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc-noupdate.dot
grep -v kernel_module ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc-noupdate.dot > ${BUILDHISTORY_DIR_IMAGE}/depends-nokernel-nolibc-noupdate-nomodules.dot
# Workaround for broken shell function dependencies
if false ; then
get_package_filename
list_package_depends
list_package_recommends
fi
}
buildhistory_get_imageinfo() {
# List the files in the image, but exclude date/time etc.
# This awk script is somewhat messy, but handles where the size is not printed for device files under pseudo
( cd ${IMAGE_ROOTFS} && find . -ls | awk '{ if ( $7 ~ /[0-9]/ ) printf "%s %10-s %10-s %10s %s %s %s\n", $3, $5, $6, $7, $11, $12, $13 ; else printf "%s %10-s %10-s %10s %s %s %s\n", $3, $5, $6, 0, $10, $11, $12 }' > ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt )
# Record some machine-readable meta-information about the image
echo -n > ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
cat >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt <<END
${@buildhistory_get_imagevars(d)}
END
imagesize=`du -ks ${IMAGE_ROOTFS} | awk '{ print $1 }'`
echo "IMAGESIZE = $imagesize" >> ${BUILDHISTORY_DIR_IMAGE}/image-info.txt
# Add some configuration information
echo "${MACHINE}: ${IMAGE_BASENAME} configured for ${DISTRO} ${DISTRO_VERSION}" > ${BUILDHISTORY_DIR_IMAGE}/build-id
cat >> ${BUILDHISTORY_DIR_IMAGE}/build-id <<END
${@buildhistory_get_layers(d)}
END
}
# By prepending we get in before the removal of packaging files
ROOTFS_POSTPROCESS_COMMAND =+ "buildhistory_get_image_installed ; "
IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
def buildhistory_get_layers(d):
layertext = "Configured metadata layers:\n%s\n" % '\n'.join(get_layers_branch_rev(d))
return layertext
def buildhistory_get_imagevars(d):
imagevars = "DISTRO DISTRO_VERSION USER_CLASSES IMAGE_CLASSES IMAGE_FEATURES IMAGE_LINGUAS IMAGE_INSTALL BAD_RECOMMENDATIONS ROOTFS_POSTPROCESS_COMMAND IMAGE_POSTPROCESS_COMMAND"
ret = ""
for var in imagevars.split():
value = d.getVar(var, True) or ""
ret += "%s = %s\n" % (var, value)
return ret.rstrip('\n')
buildhistory_commit() {
if [ ! -d ${BUILDHISTORY_DIR} ] ; then
# Code above that creates this dir never executed, so there can't be anything to commit
exit
fi
( cd ${BUILDHISTORY_DIR}/
# Initialise the repo if necessary
if [ ! -d .git ] ; then
git init -q
fi
# Ensure there are new/changed files to commit
repostatus=`git status --porcelain`
if [ "$repostatus" != "" ] ; then
git add ${BUILDHISTORY_DIR}/*
HOSTNAME=`cat /etc/hostname 2>/dev/null || echo unknown`
git commit ${BUILDHISTORY_DIR}/ -m "Build ${BUILDNAME} of ${DISTRO} ${DISTRO_VERSION} for machine ${MACHINE} on $HOSTNAME" --author "${BUILDHISTORY_COMMIT_AUTHOR}" > /dev/null
if [ "${BUILDHISTORY_PUSH_REPO}" != "" ] ; then
git push -q ${BUILDHISTORY_PUSH_REPO}
fi
fi) || true
}
python buildhistory_eventhandler() {
import bb.build
import bb.event
if isinstance(e, bb.event.BuildCompleted):
if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
bb.build.exec_func("buildhistory_commit", e.data)
}
addhandler buildhistory_eventhandler

View File

@@ -0,0 +1,281 @@
BUILDSTATS_BASE = ${TMPDIR}/buildstats/
BNFILE = ${BUILDSTATS_BASE}/.buildname
DEVFILE = ${BUILDSTATS_BASE}/.device
################################################################################
# Build statistics gathering.
#
# The CPU and Time gathering/tracking functions and bbevent inspiration
# were written by Christopher Larson and can be seen here:
# http://kergoth.pastey.net/142813
#
################################################################################
def get_process_cputime(pid):
fields = open("/proc/%d/stat" % pid, "r").readline().rstrip().split()
# 13: utime, 14: stime, 15: cutime, 16: cstime
return sum(int(field) for field in fields[13:16])
def get_cputime():
fields = open("/proc/stat", "r").readline().rstrip().split()[1:]
return sum(int(field) for field in fields)
def set_bn(e):
bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
try:
os.remove(e.data.getVar('BNFILE', True))
except:
pass
file = open(e.data.getVar('BNFILE', True), "w")
file.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
file.close()
def get_bn(e):
file = open(e.data.getVar('BNFILE', True))
bn = file.readline()
file.close()
return bn
def set_device(e):
tmpdir = e.data.getVar('TMPDIR', True)
try:
os.remove(e.data.getVar('DEVFILE', True))
except:
pass
############################################################################
# We look for the volume TMPDIR lives on. To do all disks would make little
# sense and not give us any particularly useful data. In theory we could do
# something like stick DL_DIR on a different partition and this would
# throw stats gathering off. The same goes with SSTATE_DIR. However, let's
# get the basics in here and work on the cornercases later.
# A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
# If we end up hitting one of these fs, we'll just skip diskstats collection.
############################################################################
device=os.stat(tmpdir)
majordev=os.major(device.st_dev)
minordev=os.minor(device.st_dev)
############################################################################
# Bug 1700:
# Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
# we set rdev to NoLogicalDevice and search for it later. If we find NLD
# we do not collect diskstats as the method to collect meaningful statistics
# for these fs types requires a bit more research.
############################################################################
rdev="NoLogicalDevice"
try:
for line in open("/proc/diskstats", "r"):
if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
rdev=line.split()[2]
except:
pass
file = open(e.data.getVar('DEVFILE', True), "w")
file.write(rdev)
file.close()
def get_device(e):
file = open(e.data.getVar('DEVFILE', True))
device = file.readline()
file.close()
return device
def get_diskstats(dev):
import itertools
############################################################################
# For info on what these are, see kernel doc file iostats.txt
############################################################################
DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
try:
for x in open("/proc/diskstats", "r"):
if dev in x:
diskstats_val = x.rstrip().split()[4:]
except IOError as e:
return
diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
return diskstats
def set_diskdata(var, dev, data):
data.setVar(var, get_diskstats(dev))
def get_diskdata(var, dev, data):
olddiskdata = data.getVar(var, False)
diskdata = {}
if olddiskdata is None:
return
newdiskdata = get_diskstats(dev)
for key in olddiskdata.iterkeys():
diskdata["Start"+key] = str(int(olddiskdata[key]))
diskdata["End"+key] = str(int(newdiskdata[key]))
return diskdata
def set_timedata(var, data):
import time
time = time.time()
cputime = get_cputime()
proctime = get_process_cputime(os.getpid())
data.setVar(var, (time, cputime, proctime))
def get_timedata(var, data):
import time
timedata = data.getVar(var, False)
if timedata is None:
return
oldtime, oldcpu, oldproc = timedata
procdiff = get_process_cputime(os.getpid()) - oldproc
cpudiff = get_cputime() - oldcpu
timediff = time.time() - oldtime
if cpudiff > 0:
cpuperc = float(procdiff) * 100 / cpudiff
else:
cpuperc = None
return timediff, cpuperc
def write_task_data(status, logfile, dev, e):
bn = get_bn(e)
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
taskdir = os.path.join(bsdir, bb.data.expand("${PF}", e.data))
file = open(os.path.join(logfile), "a")
timedata = get_timedata("__timedata_task", e.data)
if timedata:
elapsedtime, cpu = timedata
file.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
(e.task, elapsedtime), e.data))
if cpu:
file.write("CPU usage: %0.1f%% \n" % cpu)
############################################################################
# Here we gather up disk data. In an effort to avoid lying with stats
# I do a bare minimum of analysis of collected data.
# The simple fact is, doing disk io collection on a per process basis
# without effecting build time would be difficult.
# For the best information, running things with BB_TOTAL_THREADS = "1"
# would return accurate per task results.
############################################################################
if dev != "NoLogicalDevice":
diskdata = get_diskdata("__diskdata_task", dev, e.data)
if diskdata:
for key in sorted(diskdata.iterkeys()):
file.write(key + ": " + diskdata[key] + "\n")
if status is "passed":
file.write("Status: PASSED \n")
else:
file.write("Status: FAILED \n")
file.write("Ended: %0.2f \n" % time.time())
file.close()
python run_buildstats () {
import bb.build
import bb.event
import bb.data
import time, subprocess, platform
if isinstance(e, bb.event.BuildStarted):
########################################################################
# at first pass make the buildstats heriarchy and then
# set the buildname
########################################################################
try:
bb.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
except:
pass
set_bn(e)
bn = get_bn(e)
set_device(e)
device = get_device(e)
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
try:
bb.mkdirhier(bsdir)
except:
pass
if device != "NoLogicalDevice":
set_diskdata("__diskdata_build", device, e.data)
set_timedata("__timedata_build", e.data)
build_time = os.path.join(bsdir, "build_stats")
# write start of build into build_time
file = open(build_time,"a")
host_info = platform.uname()
file.write("Host Info: ")
for x in host_info:
if x:
file.write(x + " ")
file.write("\n")
file.write("Build Started: %0.2f \n" % time.time())
file.close()
elif isinstance(e, bb.event.BuildCompleted):
bn = get_bn(e)
device = get_device(e)
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
taskdir = os.path.join(bsdir, bb.data.expand("${PF}", e.data))
build_time = os.path.join(bsdir, "build_stats")
file = open(build_time, "a")
########################################################################
# Write build statistics for the build
########################################################################
timedata = get_timedata("__timedata_build", e.data)
if timedata:
time, cpu = timedata
# write end of build and cpu used into build_time
file = open(build_time, "a")
file.write("Elapsed time: %0.2f seconds \n" % (time))
if cpu:
file.write("CPU usage: %0.1f%% \n" % cpu)
if device != "NoLogicalDevice":
diskio = get_diskdata("__diskdata_build", device, e.data)
if diskio:
for key in sorted(diskio.iterkeys()):
file.write(key + ": " + diskio[key] + "\n")
file.close()
if isinstance(e, bb.build.TaskStarted):
bn = get_bn(e)
device = get_device(e)
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
taskdir = os.path.join(bsdir, bb.data.expand("${PF}", e.data))
if device != "NoLogicalDevice":
set_diskdata("__diskdata_task", device, e.data)
set_timedata("__timedata_task", e.data)
try:
bb.mkdirhier(taskdir)
except:
pass
# write into the task event file the name and start time
file = open(os.path.join(taskdir, e.task), "a")
file.write("Event: %s \n" % bb.event.getName(e))
file.write("Started: %0.2f \n" % time.time())
file.close()
elif isinstance(e, bb.build.TaskSucceeded):
bn = get_bn(e)
device = get_device(e)
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
taskdir = os.path.join(bsdir, bb.data.expand("${PF}", e.data))
write_task_data("passed", os.path.join(taskdir, e.task), device, e)
if e.task == "do_rootfs":
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
bs=os.path.join(bsdir, "build_stats")
file = open(bs,"a")
rootfs = e.data.getVar('IMAGE_ROOTFS', True)
rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
file.write("Uncompressed Rootfs size: %s" % rootfs_size)
file.close()
elif isinstance(e, bb.build.TaskFailed):
bn = get_bn(e)
device = get_device(e)
bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
taskdir = os.path.join(bsdir, bb.data.expand("${PF}", e.data))
write_task_data("failed", os.path.join(taskdir, e.task), device, e)
########################################################################
# Lets make things easier and tell people where the build failed in
# build_status. We do this here because BuildCompleted triggers no
# matter what the status of the build actually is
########################################################################
build_status = os.path.join(bsdir, "build_stats")
file = open(build_status,"a")
file.write(bb.data.expand("Failed at: ${PF} at task: %s \n" % e.task, e.data))
file.close()
}
addhandler run_buildstats

View File

@@ -0,0 +1,112 @@
DEPENDS += " cmake-native "
# We need to unset CCACHE otherwise cmake gets too confused
CCACHE = ""
# We want the staging and installing functions from autotools
inherit autotools
# Use in-tree builds by default but allow this to be changed
# since some packages do not support them (e.g. llvm 2.5).
OECMAKE_SOURCEPATH ?= "."
# If declaring this, make sure you also set EXTRA_OEMAKE to
# "-C ${OECMAKE_BUILDPATH}". So it will run the right makefiles.
OECMAKE_BUILDPATH ?= ""
# C/C++ Compiler (without cpu arch/tune arguments)
OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
# Compiler flags
OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS}"
OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} -fpermissive"
OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CPPFLAGS} -DNDEBUG"
OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
OECMAKE_RPATH ?= ""
OECMAKE_PERLNATIVE_DIR ??= ""
OECMAKE_EXTRA_ROOT_PATH ?= ""
cmake_do_generate_toolchain_file() {
cat > ${WORKDIR}/toolchain.cmake <<EOF
# CMake system name must be something like "Linux".
# This is important for cross-compiling.
set( CMAKE_SYSTEM_NAME `echo ${SDK_OS} | sed 's/^./\u&/'` )
set( CMAKE_SYSTEM_PROCESSOR ${TARGET_ARCH} )
set( CMAKE_C_COMPILER ${OECMAKE_C_COMPILER} )
set( CMAKE_CXX_COMPILER ${OECMAKE_CXX_COMPILER} )
set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
# only search in the paths provided so cmake doesnt pick
# up libraries and tools from the native build machine
set( CMAKE_FIND_ROOT_PATH ${STAGING_DIR_HOST} ${STAGING_DIR_NATIVE} ${CROSS_DIR} ${OECMAKE_PERLNATIVE_DIR} ${OECMAKE_EXTRA_ROOT_PATH} )
set( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
set( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
# Use qt.conf settings
set( ENV{QT_CONF_PATH} ${WORKDIR}/qt.conf )
# We need to set the rpath to the correct directory as cmake does not provide any
# directory as rpath by default
set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
# Use native cmake modules
set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
# add for non /usr/lib libdir, e.g. /usr/lib64
set( CMAKE_LIBRARY_PATH ${libdir} )
EOF
}
addtask generate_toolchain_file after do_patch before do_configure
cmake_do_configure() {
if [ ${OECMAKE_BUILDPATH} ]
then
mkdir -p ${OECMAKE_BUILDPATH}
cd ${OECMAKE_BUILDPATH}
fi
# Just like autotools cmake can use a site file to cache result that need generated binaries to run
if [ -e ${WORKDIR}/site-file.cmake ] ; then
OECMAKE_SITEFILE=" -C ${WORKDIR}/site-file.cmake"
else
OECMAKE_SITEFILE=""
fi
cmake \
${OECMAKE_SITEFILE} \
${OECMAKE_SOURCEPATH} \
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
-DCMAKE_INSTALL_SO_NO_EXE=0 \
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
-DCMAKE_VERBOSE_MAKEFILE=1 \
${EXTRA_OECMAKE} \
-Wno-dev
}
cmake_do_compile() {
if [ ${OECMAKE_BUILDPATH} ]
then
cd ${OECMAKE_BUILDPATH}
fi
base_do_compile
}
cmake_do_install() {
if [ ${OECMAKE_BUILDPATH} ];
then
cd ${OECMAKE_BUILDPATH}
fi
autotools_do_install
}
EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file

View File

@@ -0,0 +1,17 @@
cml1_do_configure() {
set -e
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake oldconfig
}
EXPORT_FUNCTIONS do_configure
addtask configure after do_unpack do_patch before do_compile
inherit terminal
python do_menuconfig() {
oe_terminal("make menuconfig", '${PN} Configuration', d)
}
do_menuconfig[nostamp] = "1"
addtask menuconfig after do_configure

View File

@@ -0,0 +1,104 @@
# Deploy sources for recipes for compliance with copyleft-style licenses
# Defaults to using symlinks, as it's a quick operation, and one can easily
# follow the links when making use of the files (e.g. tar with the -h arg).
#
# By default, includes all GPL and LGPL, and excludes CLOSED and Proprietary.
#
# vi:sts=4:sw=4:et
COPYLEFT_SOURCES_DIR ?= '${DEPLOY_DIR}/copyleft_sources'
COPYLEFT_LICENSE_INCLUDE ?= 'GPL* LGPL*'
COPYLEFT_LICENSE_INCLUDE[type] = 'list'
COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which include licenses'
COPYLEFT_LICENSE_EXCLUDE ?= 'CLOSED Proprietary'
COPYLEFT_LICENSE_EXCLUDE[type] = 'list'
COPYLEFT_LICENSE_INCLUDE[doc] = 'Space separated list of globs which exclude licenses'
COPYLEFT_RECIPE_TYPE ?= '${@copyleft_recipe_type(d)}'
COPYLEFT_RECIPE_TYPE[doc] = 'The "type" of the current recipe (e.g. target, native, cross)'
COPYLEFT_RECIPE_TYPES ?= 'target'
COPYLEFT_RECIPE_TYPES[type] = 'list'
COPYLEFT_RECIPE_TYPES[doc] = 'Space separated list of recipe types to include'
COPYLEFT_AVAILABLE_RECIPE_TYPES = 'target native nativesdk cross crosssdk cross-canadian'
COPYLEFT_AVAILABLE_RECIPE_TYPES[type] = 'list'
COPYLEFT_AVAILABLE_RECIPE_TYPES[doc] = 'Space separated list of available recipe types'
def copyleft_recipe_type(d):
for recipe_type in oe.data.typed_value('COPYLEFT_AVAILABLE_RECIPE_TYPES', d):
if oe.utils.inherits(d, recipe_type):
return recipe_type
return 'target'
def copyleft_should_include(d):
"""Determine if this recipe's sources should be deployed for compliance"""
import ast
import oe.license
from fnmatch import fnmatchcase as fnmatch
recipe_type = d.getVar('COPYLEFT_RECIPE_TYPE', True)
if recipe_type not in oe.data.typed_value('COPYLEFT_RECIPE_TYPES', d):
return False, 'recipe type "%s" is excluded' % recipe_type
include = oe.data.typed_value('COPYLEFT_LICENSE_INCLUDE', d)
exclude = oe.data.typed_value('COPYLEFT_LICENSE_EXCLUDE', d)
try:
is_included, excluded = oe.license.is_included(d.getVar('LICENSE', True), include, exclude)
except oe.license.LicenseError as exc:
bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
else:
if is_included:
return True, None
else:
return False, 'recipe has excluded licenses: %s' % ', '.join(excluded)
python do_prepare_copyleft_sources () {
"""Populate a tree of the recipe sources and emit patch series files"""
import os.path
import shutil
p = d.getVar('P', True)
included, reason = copyleft_should_include(d)
if not included:
bb.debug(1, 'copyleft: %s is excluded: %s' % (p, reason))
return
else:
bb.debug(1, 'copyleft: %s is included' % p)
sources_dir = d.getVar('COPYLEFT_SOURCES_DIR', 1)
src_uri = d.getVar('SRC_URI', 1).split()
fetch = bb.fetch2.Fetch(src_uri, d)
ud = fetch.ud
locals = (fetch.localpath(url) for url in fetch.urls)
localpaths = [local for local in locals if not local.endswith('.bb')]
if not localpaths:
return
pf = d.getVar('PF', True)
dest = os.path.join(sources_dir, pf)
shutil.rmtree(dest, ignore_errors=True)
bb.mkdirhier(dest)
for path in localpaths:
os.symlink(path, os.path.join(dest, os.path.basename(path)))
patches = src_patches(d)
for patch in patches:
_, _, local, _, _, parm = bb.decodeurl(patch)
patchdir = parm.get('patchdir')
if patchdir:
series = os.path.join(dest, 'series.subdir.%s' % patchdir.replace('/', '_'))
else:
series = os.path.join(dest, 'series')
with open(series, 'a') as s:
s.write('%s -p%s\n' % (os.path.basename(local), parm['striplevel']))
}
addtask prepare_copyleft_sources after do_fetch before do_build
do_build[recrdeptask] += 'do_prepare_copyleft_sources'

View File

@@ -0,0 +1,74 @@
# Common code for generating core reference images
#
# Copyright (C) 2007-2011 Linux Foundation
LIC_FILES_CHKSUM = "file://${COREBASE}/LICENSE;md5=3f40d7994397109285ec7b81fdeb3b58 \
file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420"
# IMAGE_FEATURES control content of the core reference images
#
# By default we install task-core-boot and task-base packages - this gives us
# working (console only) rootfs.
#
# Available IMAGE_FEATURES:
#
# - apps-console-core
# - x11-mini - minimal environment for X11 server
# - x11-base - X11 server + minimal desktop
# - x11-sato - OpenedHand Sato environment
# - x11-netbook - Metacity based environment for netbooks
# - apps-x11-core - X Terminal, file manager, file editor
# - apps-x11-games
# - apps-x11-pimlico - OpenedHand Pimlico apps
# - tools-sdk - SDK
# - tools-debug - debugging tools
# - tools-profile - profiling tools
# - tools-testapps - tools usable to make some device tests
# - nfs-server - NFS server (exports / over NFS to everybody)
# - ssh-server-dropbear - SSH server (dropbear)
# - ssh-server-openssh - SSH server (openssh)
# - debug-tweaks - makes an image suitable for development
#
PACKAGE_GROUP_apps-console-core = "task-core-apps-console"
PACKAGE_GROUP_x11-mini = "task-core-x11-mini"
PACKAGE_GROUP_x11-base = "task-core-x11-base"
PACKAGE_GROUP_x11-sato = "task-core-x11-sato"
PACKAGE_GROUP_x11-netbook = "task-core-x11-netbook"
PACKAGE_GROUP_apps-x11-core = "task-core-apps-x11-core"
PACKAGE_GROUP_apps-x11-games = "task-core-apps-x11-games"
PACKAGE_GROUP_apps-x11-pimlico = "task-core-apps-x11-pimlico"
PACKAGE_GROUP_tools-debug = "task-core-tools-debug"
PACKAGE_GROUP_tools-profile = "task-core-tools-profile"
PACKAGE_GROUP_tools-testapps = "task-core-tools-testapps"
PACKAGE_GROUP_tools-sdk = "task-core-sdk task-core-standalone-sdk-target"
PACKAGE_GROUP_nfs-server = "task-core-nfs-server"
PACKAGE_GROUP_ssh-server-dropbear = "task-core-ssh-dropbear"
PACKAGE_GROUP_ssh-server-openssh = "task-core-ssh-openssh"
PACKAGE_GROUP_package-management = "${ROOTFS_PKGMANAGE}"
PACKAGE_GROUP_qt4-pkgs = "task-core-qt-demos"
POKY_BASE_INSTALL = '\
task-core-boot \
task-base-extended \
\
${@base_contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)} \
\
${POKY_EXTRA_INSTALL} \
'
POKY_EXTRA_INSTALL ?= ""
IMAGE_INSTALL ?= "${POKY_BASE_INSTALL}"
X11_IMAGE_FEATURES = "x11-base apps-x11-core package-management"
ENHANCED_IMAGE_FEATURES = "${X11_IMAGE_FEATURES} apps-x11-games apps-x11-pimlico package-management"
SATO_IMAGE_FEATURES = "${ENHANCED_IMAGE_FEATURES} x11-sato ssh-server-dropbear"
inherit image
# Create /etc/timestamp during image construction to give a reasonably sane default time setting
ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
# Zap the root password if debug-tweaks feature is not enabled
ROOTFS_POSTPROCESS_COMMAND += '${@base_contains("IMAGE_FEATURES", "debug-tweaks", "", "zap_root_password ; ",d)}'

View File

@@ -0,0 +1,46 @@
#
# cpan-base providers various perl related information needed for building
# cpan modules
#
FILES_${PN} += "${libdir}/perl ${datadir}/perl"
DEPENDS += "${@["perl", "perl-native"][(bb.data.inherits_class('native', d))]}"
RDEPENDS += "${@["perl", ""][(bb.data.inherits_class('native', d))]}"
PERL_OWN_DIR = "${@["", "/perl-native"][(bb.data.inherits_class('native', d))]}"
# Determine the staged version of perl from the perl configuration file
def get_perl_version(d):
import re
cfg = bb.data.expand('${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh', d)
try:
f = open(cfg, 'r')
except IOError:
return None
l = f.readlines();
f.close();
r = re.compile("^version='(\d*\.\d*\.\d*)'")
for s in l:
m = r.match(s)
if m:
return m.group(1)
return None
# Determine where the library directories are
def perl_get_libdirs(d):
libdir = d.getVar('libdir', 1)
if is_target(d) == "no":
libdir += '/perl-native'
libdir += '/perl'
return libdir
def is_target(d):
if not bb.data.inherits_class('native', d):
return "yes"
return "no"
PERLLIBDIRS = "${@perl_get_libdirs(d)}"
FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
${PERLLIBDIRS}/auto/*/*/.debug \
${PERLLIBDIRS}/auto/*/*/*/.debug"

View File

@@ -0,0 +1,44 @@
#
# This is for perl modules that use the old Makefile.PL build system
#
inherit cpan-base perlnative
EXTRA_CPANFLAGS ?= ""
EXTRA_PERLFLAGS ?= ""
# Env var which tells perl if it should use host (no) or target (yes) settings
export PERLCONFIGTARGET = "${@is_target(d)}"
# Env var which tells perl where the perl include files are
export PERL_INC = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}/CORE"
export PERL_LIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
export PERL_ARCHLIB = "${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/${@get_perl_version(d)}"
export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
cpan_do_configure () {
export PERL5LIB="${PERL_ARCHLIB}"
yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL ${EXTRA_CPANFLAGS}
if [ "${BUILD_SYS}" != "${HOST_SYS}" ]; then
. ${STAGING_LIBDIR}${PERL_OWN_DIR}/perl/config.sh
# Use find since there can be a Makefile generated for each Makefile.PL
for f in `find -name Makefile.PL`; do
f2=`echo $f | sed -e 's/.PL//'`
sed -i -e "s:\(PERL_ARCHLIB = \).*:\1${PERL_ARCHLIB}:" \
-e 's/perl.real/perl/' \
$f2
done
fi
}
cpan_do_compile () {
oe_runmake PASTHRU_INC="${CFLAGS}" CCFLAGS="${CFLAGS}" LD="${CCLD}"
}
cpan_do_install () {
oe_runmake DESTDIR="${D}" install_vendor
for PERLSCRIPT in `grep -rIEl '#!${bindir}/perl-native.*/perl' ${D}`; do
sed -i -e 's|^#!${bindir}/perl-native.*/perl|#!/usr/bin/env nativeperl|' $PERLSCRIPT
done
}
EXPORT_FUNCTIONS do_configure do_compile do_install

View File

@@ -0,0 +1,49 @@
#
# This is for perl modules that use the new Build.PL build system
#
inherit cpan-base
#
# We also need to have built libmodule-build-perl-native for
# everything except libmodule-build-perl-native itself (which uses
# this class, but uses itself as the provider of
# libmodule-build-perl)
#
def cpan_build_dep_prepend(d):
if d.getVar('CPAN_BUILD_DEPS', 1):
return ''
pn = d.getVar('PN', 1)
if pn in ['libmodule-build-perl', 'libmodule-build-perl-native']:
return ''
return 'libmodule-build-perl-native '
DEPENDS_prepend = "${@cpan_build_dep_prepend(d)}"
cpan_build_do_configure () {
if [ ${@is_target(d)} == "yes" ]; then
# build for target
. ${STAGING_LIBDIR}/perl/config.sh
perl Build.PL --installdirs vendor \
--destdir ${D} \
--install_path lib="${datadir}/perl" \
--install_path arch="${libdir}/perl" \
--install_path script=${bindir} \
--install_path bin=${bindir} \
--install_path bindoc=${mandir}/man1 \
--install_path libdoc=${mandir}/man3
else
# build for host
perl Build.PL --installdirs site --destdir ${D}
fi
}
cpan_build_do_compile () {
perl Build
}
cpan_build_do_install () {
perl Build install
}
EXPORT_FUNCTIONS do_configure do_compile do_install

View File

@@ -0,0 +1,87 @@
#
# NOTE - When using this class the user is repsonsible for ensuring that
# TRANSLATED_TARGET_ARCH is added into PN. This ensures that if the TARGET_ARCH
# is changed, another nativesdk xxx-canadian-cross can be installed
#
# SDK packages are built either explicitly by the user,
# or indirectly via dependency. No need to be in 'world'.
EXCLUDE_FROM_WORLD = "1"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
#
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
python () {
archs = d.getVar('PACKAGE_ARCHS', True).split()
sdkarchs = []
for arch in archs:
sdkarchs.append(arch + '-nativesdk')
d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
}
MULTIMACH_TARGET_SYS = "${PACKAGE_ARCH}${HOST_VENDOR}-${HOST_OS}"
INHIBIT_DEFAULT_DEPS = "1"
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}-nativesdk"
TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}"
PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
PKGDATA_DIR = "${TMPDIR}/pkgdata/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}"
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-nativesdk${HOST_VENDOR}-${HOST_OS}/"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
HOST_OS = "${SDK_OS}"
HOST_PREFIX = "${SDK_PREFIX}"
HOST_CC_ARCH = "${SDK_CC_ARCH}"
HOST_LD_ARCH = "${SDK_LD_ARCH}"
HOST_AS_ARCH = "${SDK_AS_ARCH}"
#assign DPKG_ARCH
DPKG_ARCH = "${SDK_ARCH}"
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
CXXFLAGS = "${BUILDSDK_CFLAGS}"
LDFLAGS = "${BUILDSDK_LDFLAGS}"
DEPENDS_GETTEXT = "gettext-native gettext-nativesdk"
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
target_libdir := "${libdir}"
target_includedir := "${includedir}"
target_base_libdir := "${base_libdir}"
target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
# Change to place files in SDKPATH
base_prefix = "${SDKPATHNATIVE}"
prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
bindir = "${exec_prefix}/bin/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
sbindir = "${bindir}"
base_bindir = "${bindir}"
base_sbindir = "${bindir}"
libdir = "${exec_prefix}/lib/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
libexecdir = "${exec_prefix}/libexec/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
FILES_${PN} = "${prefix}"
FILES_${PN}-dbg += "${prefix}/.debug \
${prefix}/bin/.debug \
"
export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
# Cross-canadian packages need to pull in nativesdk dynamic libs
SHLIBSDIR = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${BUILD_OS}/shlibs"
do_populate_sysroot[stamp-extra-info] = ""
do_package[stamp-extra-info] = ""

View File

@@ -0,0 +1,60 @@
inherit relocatable
# Cross packages are built indirectly via dependency,
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
PACKAGES = ""
PACKAGES_DYNAMIC = ""
PACKAGES_DYNAMIC_virtclass-native = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_VENDOR = "${BUILD_VENDOR}"
HOST_OS = "${BUILD_OS}"
HOST_PREFIX = "${BUILD_PREFIX}"
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
export PKG_CONFIG_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
CXXFLAGS = "${BUILD_CFLAGS}"
LDFLAGS = "${BUILD_LDFLAGS}"
LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
TOOLCHAIN_OPTIONS = ""
DEPENDS_GETTEXT = "gettext-native"
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
target_base_prefix := "${base_prefix}"
target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
target_base_libdir = "${target_base_prefix}/${baselib}"
target_libdir = "${target_exec_prefix}/${baselib}"
target_includedir := "${includedir}"
# Overrides for paths
CROSS_TARGET_SYS_DIR = "${MULTIMACH_TARGET_SYS}"
prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
base_prefix = "${STAGING_DIR_NATIVE}"
exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
bindir = "${exec_prefix}/bin/${CROSS_TARGET_SYS_DIR}"
sbindir = "${bindir}"
base_bindir = "${bindir}"
base_sbindir = "${bindir}"
libdir = "${exec_prefix}/lib/${CROSS_TARGET_SYS_DIR}"
libexecdir = "${exec_prefix}/libexec/${CROSS_TARGET_SYS_DIR}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}"
do_install () {
oe_runmake 'DESTDIR=${D}' install
}

View File

@@ -0,0 +1,24 @@
inherit cross
PACKAGE_ARCH = "${SDK_ARCH}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${SDK_OS}"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
TARGET_ARCH = "${SDK_ARCH}"
TARGET_VENDOR = "${SDK_VENDOR}"
TARGET_OS = "${SDK_OS}"
TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
TARGET_FPU = ""
target_libdir = "${SDKPATHNATIVE}${libdir_nativesdk}"
target_includedir = "${SDKPATHNATIVE}${includedir_nativesdk}"
target_base_libdir = "${SDKPATHNATIVE}${base_libdir_nativesdk}"
target_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
target_exec_prefix = "${SDKPATHNATIVE}${exec_prefix_nativesdk}"
baselib = "lib"
do_populate_sysroot[stamp-extra-info] = ""
do_package[stamp-extra-info] = ""

View File

@@ -0,0 +1,127 @@
# Debian package renaming only occurs when a package is built
# We therefore have to make sure we build all runtime packages
# before building the current package to make the packages runtime
# depends are correct
#
# Custom library package names can be defined setting
# DEBIANNAME_ + pkgname to the desired name.
#
# Better expressed as ensure all RDEPENDS package before we package
# This means we can't have circular RDEPENDS/RRECOMMENDS
DEBIANRDEP = "do_package"
do_package_write_ipk[rdeptask] = "${DEBIANRDEP}"
do_package_write_deb[rdeptask] = "${DEBIANRDEP}"
do_package_write_tar[rdeptask] = "${DEBIANRDEP}"
do_package_write_rpm[rdeptask] = "${DEBIANRDEP}"
python () {
if not d.getVar("PACKAGES", True):
d.setVar("DEBIANRDEP", "")
}
python debian_package_name_hook () {
import glob, copy, stat, errno, re
pkgdest = d.getVar('PKGDEST', 1)
packages = d.getVar('PACKAGES', 1)
bin_re = re.compile(".*/s?" + os.path.basename(d.getVar("bindir", True)) + "$")
lib_re = re.compile(".*/" + os.path.basename(d.getVar("libdir", True)) + "$")
so_re = re.compile("lib.*\.so")
def socrunch(s):
s = s.lower().replace('_', '-')
m = re.match("^(.*)(.)\.so\.(.*)$", s)
if m is None:
return None
if m.group(2) in '0123456789':
bin = '%s%s-%s' % (m.group(1), m.group(2), m.group(3))
else:
bin = m.group(1) + m.group(2) + m.group(3)
dev = m.group(1) + m.group(2)
return (bin, dev)
def isexec(path):
try:
s = os.stat(path)
except (os.error, AttributeError):
return 0
return (s[stat.ST_MODE] & stat.S_IEXEC)
def auto_libname(packages, orig_pkg):
sonames = []
has_bins = 0
has_libs = 0
pkg_dir = os.path.join(pkgdest, orig_pkg)
for root, dirs, files in os.walk(pkg_dir):
if bin_re.match(root) and files:
has_bins = 1
if lib_re.match(root) and files:
has_libs = 1
for f in files:
if so_re.match(f):
fp = os.path.join(root, f)
cmd = (d.getVar('BUILD_PREFIX', 1) or "") + "objdump -p " + fp + " 2>/dev/null"
fd = os.popen(cmd)
lines = fd.readlines()
fd.close()
for l in lines:
m = re.match("\s+SONAME\s+([^\s]*)", l)
if m and not m.group(1) in sonames:
sonames.append(m.group(1))
bb.debug(1, 'LIBNAMES: pkg %s libs %d bins %d sonames %s' % (orig_pkg, has_libs, has_bins, sonames))
soname = None
if len(sonames) == 1:
soname = sonames[0]
elif len(sonames) > 1:
lead = d.getVar('LEAD_SONAME', 1)
if lead:
r = re.compile(lead)
filtered = []
for s in sonames:
if r.match(s):
filtered.append(s)
if len(filtered) == 1:
soname = filtered[0]
elif len(filtered) > 1:
bb.note("Multiple matches (%s) for LEAD_SONAME '%s'" % (", ".join(filtered), lead))
else:
bb.note("Multiple libraries (%s) found, but LEAD_SONAME '%s' doesn't match any of them" % (", ".join(sonames), lead))
else:
bb.note("Multiple libraries (%s) found and LEAD_SONAME not defined" % ", ".join(sonames))
if has_libs and not has_bins and soname:
soname_result = socrunch(soname)
if soname_result:
(pkgname, devname) = soname_result
for pkg in packages.split():
if (d.getVar('PKG_' + pkg) or d.getVar('DEBIAN_NOAUTONAME_' + pkg)):
continue
debian_pn = d.getVar('DEBIANNAME_' + pkg)
if debian_pn:
newpkg = debian_pn
elif pkg == orig_pkg:
newpkg = pkgname
else:
newpkg = pkg.replace(orig_pkg, devname, 1)
mlpre=d.getVar('MLPREFIX', True)
if mlpre:
if not newpkg.find(mlpre) == 0:
newpkg = mlpre + newpkg
if newpkg != pkg:
d.setVar('PKG_' + pkg, newpkg)
# reversed sort is needed when some package is substring of another
# ie in ncurses we get without reverse sort:
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libtic orig_pkg ncurses-libtic debian_pn None newpkg libtic5
# and later
# DEBUG: LIBNAMES: pkgname libtic5 devname libtic pkg ncurses-libticw orig_pkg ncurses-libtic debian_pn None newpkg libticw
# so we need to handle ncurses-libticw->libticw5 before ncurses-libtic->libtic5
for pkg in sorted((d.getVar('AUTO_LIBNAME_PKGS', 1) or "").split(), reverse=True):
auto_libname(packages, pkg)
}
EXPORT_FUNCTIONS package_name_hook
DEBIAN_NAMES = "1"

View File

@@ -0,0 +1,11 @@
DEPLOYDIR = "${WORKDIR}/deploy-${PN}"
SSTATETASKS += "do_deploy"
do_deploy[sstate-name] = "deploy"
do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"
do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"
python do_deploy_setscene () {
sstate_setscene(d)
}
addtask do_deploy_setscene
do_deploy[dirs] = "${DEPLOYDIR} ${B}"

View File

@@ -0,0 +1,10 @@
inherit terminal
python do_devshell () {
oe_terminal(d.getVar('SHELL', True), 'OpenEmbedded Developer Shell', d)
}
addtask devshell after do_patch
do_devshell[dirs] = "${S}"
do_devshell[nostamp] = "1"

View File

@@ -0,0 +1,763 @@
require conf/distro/include/distro_tracking_fields.inc
addhandler distro_eventhandler
python distro_eventhandler() {
if bb.event.getName(e) == "BuildStarted":
import oe.distro_check as dc
logfile = dc.create_log_file(e.data, "distrodata.csv")
lf = bb.utils.lockfile("%s.lock" % logfile)
f = open(logfile, "a")
f.write("Package,Description,Owner,License,ChkSum,Status,VerMatch,Version,Upsteam,Non-Update,Reason,Recipe Status,Distro 1,Distro 2,Distro 3\n")
f.close()
bb.utils.unlockfile(lf)
return
}
addtask distrodata_np
do_distrodata_np[nostamp] = "1"
python do_distrodata_np() {
localdata = bb.data.createCopy(d)
pn = d.getVar("PN", True)
bb.note("Package Name: %s" % pn)
import oe.distro_check as dist_check
tmpdir = d.getVar('TMPDIR', True)
distro_check_dir = os.path.join(tmpdir, "distro_check")
datetime = localdata.getVar('DATETIME', True)
dist_check.update_distro_data(distro_check_dir, datetime)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
bb.note("Native Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pn.find("-nativesdk") != -1:
pnstripped = pn.split("-nativesdk")
bb.note("Native Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
bb.note("cross Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pn.find("-crosssdk") != -1:
pnstripped = pn.split("-crosssdk")
bb.note("cross Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
bb.note("initial Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
"""generate package information from .bb file"""
pname = localdata.getVar('PN', True)
pcurver = localdata.getVar('PV', True)
pdesc = localdata.getVar('DESCRIPTION', True)
if pdesc is not None:
pdesc = pdesc.replace(',','')
pdesc = pdesc.replace('\n','')
pgrp = localdata.getVar('SECTION', True)
plicense = localdata.getVar('LICENSE', True).replace(',','_')
if localdata.getVar('LIC_FILES_CHKSUM', True):
pchksum="1"
else:
pchksum="0"
if localdata.getVar('RECIPE_STATUS', True):
hasrstatus="1"
else:
hasrstatus="0"
rstatus = localdata.getVar('RECIPE_STATUS', True)
if rstatus is not None:
rstatus = rstatus.replace(',','')
pupver = localdata.getVar('RECIPE_LATEST_VERSION', True)
if pcurver == pupver:
vermatch="1"
else:
vermatch="0"
noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
if noupdate_reason is None:
noupdate="0"
else:
noupdate="1"
noupdate_reason = noupdate_reason.replace(',','')
ris = localdata.getVar('RECIPE_INTEL_SECTION', True)
maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
rttr = localdata.getVar('RECIPE_TIME_BETWEEN_LAST_TWO_RELEASES', True)
rlrd = localdata.getVar('RECIPE_LATEST_RELEASE_DATE', True)
dc = localdata.getVar('DEPENDENCY_CHECK', True)
rc = localdata.getVar('RECIPE_COMMENTS', True)
result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
bb.note("DISTRO: %s,%s,%s,%s,%s,%s,%s,%s,%s, %s, %s, %s\n" % \
(pname, pdesc, maintainer, plicense, pchksum, hasrstatus, vermatch, pcurver, pupver, noupdate, noupdate_reason, rstatus))
line = pn
for i in result:
line = line + "," + i
bb.note("%s\n" % line)
}
addtask distrodata
do_distrodata[nostamp] = "1"
python do_distrodata() {
logpath = d.getVar('LOG_DIR', True)
bb.utils.mkdirhier(logpath)
logfile = os.path.join(logpath, "distrodata.csv")
import oe.distro_check as dist_check
localdata = bb.data.createCopy(d)
tmpdir = d.getVar('TMPDIR', True)
distro_check_dir = os.path.join(tmpdir, "distro_check")
datetime = localdata.getVar('DATETIME', True)
dist_check.update_distro_data(distro_check_dir, datetime)
pn = d.getVar("PN", True)
bb.note("Package Name: %s" % pn)
if pn.find("-native") != -1:
pnstripped = pn.split("-native")
bb.note("Native Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pn.find("-cross") != -1:
pnstripped = pn.split("-cross")
bb.note("cross Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pn.find("-initial") != -1:
pnstripped = pn.split("-initial")
bb.note("initial Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
"""generate package information from .bb file"""
pname = localdata.getVar('PN', True)
pcurver = localdata.getVar('PV', True)
pdesc = localdata.getVar('DESCRIPTION', True)
if pdesc is not None:
pdesc = pdesc.replace(',','')
pdesc = pdesc.replace('\n','')
pgrp = localdata.getVar('SECTION', True)
plicense = localdata.getVar('LICENSE', True).replace(',','_')
if localdata.getVar('LIC_FILES_CHKSUM', True):
pchksum="1"
else:
pchksum="0"
if localdata.getVar('RECIPE_STATUS', True):
hasrstatus="1"
else:
hasrstatus="0"
rstatus = localdata.getVar('RECIPE_STATUS', True)
if rstatus is not None:
rstatus = rstatus.replace(',','')
pupver = localdata.getVar('RECIPE_LATEST_VERSION', True)
if pcurver == pupver:
vermatch="1"
else:
vermatch="0"
noupdate_reason = localdata.getVar('RECIPE_NO_UPDATE_REASON', True)
if noupdate_reason is None:
noupdate="0"
else:
noupdate="1"
noupdate_reason = noupdate_reason.replace(',','')
ris = localdata.getVar('RECIPE_INTEL_SECTION', True)
maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
rttr = localdata.getVar('RECIPE_TIME_BETWEEN_LAST_TWO_RELEASES', True)
rlrd = localdata.getVar('RECIPE_LATEST_RELEASE_DATE', True)
dc = localdata.getVar('DEPENDENCY_CHECK', True)
rc = localdata.getVar('RECIPE_COMMENTS', True)
# do the comparison
result = dist_check.compare_in_distro_packages_list(distro_check_dir, localdata)
lf = bb.utils.lockfile("%s.lock" % logfile)
f = open(logfile, "a")
f.write("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % \
(pname, pdesc, maintainer, plicense, pchksum, hasrstatus, vermatch, pcurver, pupver, noupdate, noupdate_reason, rstatus))
line = ""
for i in result:
line = line + "," + i
f.write(line + "\n")
f.close()
bb.utils.unlockfile(lf)
}
addtask distrodataall after do_distrodata
do_distrodataall[recrdeptask] = "do_distrodata"
do_distrodataall[nostamp] = "1"
do_distrodataall() {
:
}
addhandler checkpkg_eventhandler
python checkpkg_eventhandler() {
if bb.event.getName(e) == "BuildStarted":
import oe.distro_check as dc
logfile = dc.create_log_file(e.data, "checkpkg.csv")
lf = bb.utils.lockfile("%s.lock" % logfile)
f = open(logfile, "a")
f.write("Package\tVersion\tUpver\tLicense\tSection\tHome\tRelease\tPriority\tDepends\tBugTracker\tPE\tDescription\tStatus\tTracking\tURI\tMAINTAINER\n")
f.close()
bb.utils.unlockfile(lf)
return
}
addtask checkpkg
do_checkpkg[nostamp] = "1"
python do_checkpkg() {
localdata = bb.data.createCopy(d)
import sys
import re
import tempfile
"""
sanity check to ensure same name and type. Match as many patterns as possible
such as:
gnome-common-2.20.0.tar.gz (most common format)
gtk+-2.90.1.tar.gz
xf86-intput-synaptics-12.6.9.tar.gz
dri2proto-2.3.tar.gz
blktool_4.orig.tar.gz
libid3tag-0.15.1b.tar.gz
unzip552.tar.gz
icu4c-3_6-src.tgz
genext2fs_1.3.orig.tar.gz
gst-fluendo-mp3
"""
prefix1 = "[a-zA-Z][a-zA-Z0-9]*([\-_][a-zA-Z]\w+)*[\-_]" # match most patterns which uses "-" as separator to version digits
prefix2 = "[a-zA-Z]+" # a loose pattern such as for unzip552.tar.gz
prefix3 = "[0-9a-zA-Z]+" # a loose pattern such as for 80325-quicky-0.4.tar.gz
prefix = "(%s|%s|%s)" % (prefix1, prefix2, prefix3)
suffix = "(tar\.gz|tgz|tar\.bz2|zip|xz|rpm)"
suffixtuple = ("tar.gz", "tgz", "zip", "tar.bz2", "tar.xz", "src.rpm")
sinterstr = "(?P<name>%s?)(?P<ver>.*)" % prefix
sdirstr = "(?P<name>%s)(?P<ver>.*)\.(?P<type>%s$)" % (prefix, suffix)
def parse_inter(s):
m = re.search(sinterstr, s)
if not m:
return None
else:
return (m.group('name'), m.group('ver'), "")
def parse_dir(s):
m = re.search(sdirstr, s)
if not m:
return None
else:
return (m.group('name'), m.group('ver'), m.group('type'))
"""
Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
purpose. PE is cleared in comparison as it's not for build, and PV is cleared too
for simplicity as it's somehow difficult to get from various upstream format
"""
def __vercmp(old, new):
(on, ov, ot) = old
(en, ev, et) = new
if on != en or (et and et not in suffixtuple):
return 0
ov = re.search("[\d|\.]+[^a-zA-Z]+", ov).group()
ev = re.search("[\d|\.]+[^a-zA-Z]+", ev).group()
return bb.utils.vercmp(("0", ov, ""), ("0", ev, ""))
"""
wrapper for fetch upstream directory info
'url' - upstream link customized by regular expression
'd' - database
'tmpf' - tmpfile for fetcher output
We don't want to exit whole build due to one recipe error. So handle all exceptions
gracefully w/o leaking to outer.
"""
def internal_fetch_wget(url, d, tmpf):
status = "ErrFetchUnknown"
"""
Clear internal url cache as it's a temporary check. Not doing so will have
bitbake check url multiple times when looping through a single url
"""
fn = d.getVar('FILE', True)
bb.fetch2.urldata_cache[fn] = {}
"""
To avoid impacting bitbake build engine, this trick is required for reusing bitbake
interfaces. bb.fetch.go() is not appliable as it checks downloaded content in ${DL_DIR}
while we don't want to pollute that place. So bb.fetch2.checkstatus() is borrowed here
which is designed for check purpose but we override check command for our own purpose
"""
ld = bb.data.createCopy(d)
d.setVar('CHECKCOMMAND_wget', "/usr/bin/env wget -t 1 --passive-ftp -O %s --user-agent=\"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12\" '${URI}'" \
% tmpf.name)
bb.data.update_data(ld)
try:
fetcher = bb.fetch2.Fetch([url], ld)
fetcher.checkstatus()
status = "SUCC"
except bb.fetch2.BBFetchException, e:
status = "ErrFetch"
return status
"""
Check on middle version directory such as "2.4/" in "http://xxx/2.4/pkg-2.4.1.tar.gz",
'url' - upstream link customized by regular expression
'd' - database
'curver' - current version
Return new version if success, or else error in "Errxxxx" style
"""
def check_new_dir(url, curver, d):
pn = d.getVar('PN', True)
f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-1-" % pn)
status = internal_fetch_wget(url, d, f)
fhtml = f.read()
if status == "SUCC" and len(fhtml):
newver = parse_inter(curver)
"""
match "*4.1/">*4.1/ where '*' matches chars
N.B. add package name, only match for digits
"""
m = re.search("^%s" % prefix, curver)
if m:
s = "%s[^\d\"]*?(\d+[\.\-_])+\d+/?" % m.group()
else:
s = "(\d+[\.\-_])+\d+/?"
searchstr = "[hH][rR][eE][fF]=\"%s\">" % s
reg = re.compile(searchstr)
valid = 0
for line in fhtml.split("\n"):
if line.find(curver) >= 0:
valid = 1
m = reg.search(line)
if m:
ver = m.group().split("\"")[1]
ver = ver.strip("/")
ver = parse_inter(ver)
if ver and __vercmp(newver, ver) < 0:
newver = ver
"""Expect a match for curver in directory list, or else it indicates unknown format"""
if not valid:
status = "ErrParseInterDir"
else:
"""rejoin the path name"""
status = newver[0] + newver[1]
elif not len(fhtml):
status = "ErrHostNoDir"
f.close()
if status != "ErrHostNoDir" and re.match("Err", status):
logpath = d.getVar('LOG_DIR', 1)
os.system("cp %s %s/" % (f.name, logpath))
os.unlink(f.name)
return status
"""
Check on the last directory to search '2.4.1' in "http://xxx/2.4/pkg-2.4.1.tar.gz",
'url' - upstream link customized by regular expression
'd' - database
'curname' - current package name
Return new version if success, or else error in "Errxxxx" style
"""
def check_new_version(url, curname, d):
"""possible to have no version in pkg name, such as spectrum-fw"""
if not re.search("\d+", curname):
return pcurver
pn = d.getVar('PN', True)
f = tempfile.NamedTemporaryFile(delete=False, prefix="%s-2-" % pn)
status = internal_fetch_wget(url, d, f)
fhtml = f.read()
if status == "SUCC" and len(fhtml):
newver = parse_dir(curname)
"""match "{PN}-5.21.1.tar.gz">{PN}-5.21.1.tar.gz """
pn1 = re.search("^%s" % prefix, curname).group()
s = "[^\"]*%s[^\d\"]*?(\d+[\.\-_])+[^\"]*" % pn1
searchstr = "[hH][rR][eE][fF]=\"%s\".*[>\"]" % s
reg = re.compile(searchstr)
valid = 0
for line in fhtml.split("\n"):
m = reg.search(line)
if m:
valid = 1
ver = m.group().split("\"")[1].split("/")[-1]
if ver == "download":
ver = m.group().split("\"")[1].split("/")[-2]
ver = parse_dir(ver)
if ver and __vercmp(newver, ver) < 0:
newver = ver
"""Expect a match for curver in directory list, or else it indicates unknown format"""
if not valid:
status = "ErrParseDir"
else:
"""newver still contains a full package name string"""
status = re.search("(\d+[\.\-_])*(\d+[0-9a-zA-Z]*)", newver[1]).group()
if "_" in status:
status = re.sub("_",".",status)
elif "-" in status:
status = re.sub("-",".",status)
elif not len(fhtml):
status = "ErrHostNoDir"
f.close()
"""if host hasn't directory information, no need to save tmp file"""
if status != "ErrHostNoDir" and re.match("Err", status):
logpath = d.getVar('LOG_DIR', True)
os.system("cp %s %s/" % (f.name, logpath))
os.unlink(f.name)
return status
"""first check whether a uri is provided"""
src_uri = d.getVar('SRC_URI', True)
if not src_uri:
return
"""initialize log files."""
logpath = d.getVar('LOG_DIR', True)
bb.utils.mkdirhier(logpath)
logfile = os.path.join(logpath, "checkpkg.csv")
"""generate package information from .bb file"""
pname = d.getVar('PN', True)
if pname.find("-native") != -1:
pnstripped = pname.split("-native")
bb.note("Native Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pname.find("-cross") != -1:
pnstripped = pname.split("-cross")
bb.note("cross Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
if pname.find("-initial") != -1:
pnstripped = pname.split("-initial")
bb.note("initial Split: %s" % pnstripped)
localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
bb.data.update_data(localdata)
pdesc = localdata.getVar('DESCRIPTION', True)
pgrp = localdata.getVar('SECTION', True)
pversion = localdata.getVar('PV', True)
plicense = localdata.getVar('LICENSE', True)
psection = localdata.getVar('SECTION', True)
phome = localdata.getVar('HOMEPAGE', True)
prelease = localdata.getVar('PR', True)
ppriority = localdata.getVar('PRIORITY', True)
pdepends = localdata.getVar('DEPENDS', True)
pbugtracker = localdata.getVar('BUGTRACKER', True)
ppe = localdata.getVar('PE', True)
psrcuri = localdata.getVar('SRC_URI', True)
maintainer = localdata.getVar('RECIPE_MAINTAINER', True)
found = 0
for uri in src_uri.split():
m = re.compile('(?P<type>[^:]*)').match(uri)
if not m:
raise MalformedUrl(uri)
elif m.group('type') in ('http', 'https', 'ftp', 'cvs', 'svn', 'git'):
found = 1
pproto = m.group('type')
break
if not found:
pproto = "file"
pupver = "N/A"
pstatus = "ErrUnknown"
(type, host, path, user, pswd, parm) = bb.decodeurl(uri)
if type in ['http', 'https', 'ftp']:
pcurver = d.getVar('PV', True)
else:
pcurver = d.getVar("SRCREV", True)
if type in ['http', 'https', 'ftp']:
newver = pcurver
altpath = path
dirver = "-"
curname = "-"
"""
match version number amid the path, such as "5.7" in:
http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
N.B. how about sth. like "../5.7/5.8/..."? Not find such example so far :-P
"""
m = re.search(r"[^/]*(\d+\.)+\d+([\-_]r\d+)*/", path)
if m:
altpath = path.split(m.group())[0]
dirver = m.group().strip("/")
"""use new path and remove param. for wget only param is md5sum"""
alturi = bb.encodeurl([type, host, altpath, user, pswd, {}])
newver = check_new_dir(alturi, dirver, d)
altpath = path
if not re.match("Err", newver) and dirver != newver:
altpath = altpath.replace(dirver, newver, True)
"""Now try to acquire all remote files in current directory"""
if not re.match("Err", newver):
curname = altpath.split("/")[-1]
"""get remote name by skipping pacakge name"""
m = re.search(r"/.*/", altpath)
if not m:
altpath = "/"
else:
altpath = m.group()
alturi = bb.encodeurl([type, host, altpath, user, pswd, {}])
newver = check_new_version(alturi, curname, d)
while(newver == "ErrHostNoDir"):
if alturi == "/download":
break
else:
alturi = "/".join(alturi.split("/")[0:-2]) + "/download"
newver = check_new_version(alturi, curname, d)
if not re.match("Err", newver):
pupver = newver
if pupver != pcurver:
pstatus = "UPDATE"
else:
pstatus = "MATCH"
if re.match("Err", newver):
pstatus = newver + ":" + altpath + ":" + dirver + ":" + curname
elif type == 'git':
if user:
gituser = user + '@'
else:
gituser = ""
if 'protocol' in parm:
gitproto = parm['protocol']
else:
gitproto = "git"
gitcmd = "git ls-remote %s://%s%s%s *tag* 2>&1" % (gitproto, gituser, host, path)
gitcmd2 = "git ls-remote %s://%s%s%s HEAD 2>&1" % (gitproto, gituser, host, path)
tmp = os.popen(gitcmd).read()
tmp2 = os.popen(gitcmd2).read()
#This is for those repo have tag like: refs/tags/1.2.2
if tmp:
tmpline = tmp.split("\n")
verflag = 0
for line in tmpline:
if len(line)==0:
break;
puptag = line.split("/")[-1]
puptag = re.search("[0-9][0-9|\.|_]+[0-9]", puptag)
if puptag == None:
continue;
puptag = puptag.group()
puptag = re.sub("_",".",puptag)
plocaltag = pversion.split("+")[0]
if "git" in plocaltag:
plocaltag = plocaltag.split("-")[0]
result = bb.utils.vercmp(("0", puptag, ""), ("0", plocaltag, ""))
if result > 0:
verflag = 1
pstatus = "UPDATE"
pupver = puptag
elif verflag == 0 :
pupver = plocaltag
pstatus = "MATCH"
#This is for those no tag repo
elif tmp2:
pupver = tmp2.split("\t")[0]
if pupver in pversion:
pstatus = "MATCH"
else:
pstatus = "UPDATE"
else:
pstatus = "ErrGitAccess"
elif type == 'svn':
options = []
if user:
options.append("--username %s" % user)
if pswd:
options.append("--password %s" % pswd)
svnproto = 'svn'
if 'proto' in parm:
svnproto = parm['proto']
if 'rev' in parm:
pcurver = parm['rev']
svncmd = "svn info %s %s://%s%s/%s/ 2>&1" % (" ".join(options), svnproto, host, path, parm["module"])
print svncmd
svninfo = os.popen(svncmd).read()
for line in svninfo.split("\n"):
if re.search("^Last Changed Rev:", line):
pupver = line.split(" ")[-1]
if pupver in pversion:
pstatus = "MATCH"
else:
pstatus = "UPDATE"
if re.match("Err", pstatus):
pstatus = "ErrSvnAccess"
elif type == 'cvs':
pupver = "HEAD"
pstatus = "UPDATE"
elif type == 'file':
"""local file is always up-to-date"""
pupver = pcurver
pstatus = "MATCH"
else:
pstatus = "ErrUnsupportedProto"
if re.match("Err", pstatus):
pstatus += ":%s%s" % (host, path)
"""Read from manual distro tracking fields as alternative"""
pmver = d.getVar("RECIPE_LATEST_VERSION", True)
if not pmver:
pmver = "N/A"
pmstatus = "ErrNoRecipeData"
else:
if pmver == pcurver:
pmstatus = "MATCH"
else:
pmstatus = "UPDATE"
psrcuri = psrcuri.split()[0]
pdepends = "".join(pdepends.split("\t"))
pdesc = "".join(pdesc.split("\t"))
lf = bb.utils.lockfile("%s.lock" % logfile)
f = open(logfile, "a")
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % \
(pname,pversion,pupver,plicense,psection, phome,prelease, ppriority,pdepends,pbugtracker,ppe,pdesc,pstatus,pmver,psrcuri,maintainer))
f.close()
bb.utils.unlockfile(lf)
}
addtask checkpkgall after do_checkpkg
do_checkpkgall[recrdeptask] = "do_checkpkg"
do_checkpkgall[nostamp] = "1"
do_checkpkgall() {
:
}
addhandler distro_check_eventhandler
python distro_check_eventhandler() {
if bb.event.getName(e) == "BuildStarted":
"""initialize log files."""
import oe.distro_check as dc
result_file = dc.create_log_file(e.data, "distrocheck.csv")
return
}
addtask distro_check
do_distro_check[nostamp] = "1"
python do_distro_check() {
"""checks if the package is present in other public Linux distros"""
import oe.distro_check as dc
import bb
import shutil
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk',d):
return
localdata = bb.data.createCopy(d)
bb.data.update_data(localdata)
tmpdir = d.getVar('TMPDIR', True)
distro_check_dir = os.path.join(tmpdir, "distro_check")
logpath = d.getVar('LOG_DIR', True)
bb.utils.mkdirhier(logpath)
result_file = os.path.join(logpath, "distrocheck.csv")
datetime = localdata.getVar('DATETIME', True)
dc.update_distro_data(distro_check_dir, datetime)
# do the comparison
result = dc.compare_in_distro_packages_list(distro_check_dir, d)
# save the results
dc.save_distro_check_result(result, datetime, result_file, d)
}
addtask distro_checkall after do_distro_check
do_distro_checkall[recrdeptask] = "do_distro_check"
do_distro_checkall[nostamp] = "1"
do_distro_checkall() {
:
}
#
#Check Missing License Text.
#Use this task to generate the missing license text data for pkg-report system,
#then we can search those recipes which license text isn't exsit in common-licenses directory
#
addhandler checklicense_eventhandler
python checklicense_eventhandler() {
if bb.event.getName(e) == "BuildStarted":
"""initialize log files."""
import oe.distro_check as dc
logfile = dc.create_log_file(e.data, "missinglicense.csv")
lf = bb.utils.lockfile("%s.lock" % logfile)
f = open(logfile, "a")
f.write("Package\tLicense\tMissingLicense\n")
f.close()
bb.utils.unlockfile(lf)
return
}
addtask checklicense
do_checklicense[nostamp] = "1"
python do_checklicense() {
import os
import bb
import shutil
logpath = d.getVar('LOG_DIR', True)
bb.utils.mkdirhier(logpath)
pn = d.getVar('PN', True)
logfile = os.path.join(logpath, "missinglicense.csv")
generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
license_types = d.getVar('LICENSE', True)
for license_type in ((license_types.replace('+', '').replace('|', '&')
.replace('(', '').replace(')', '').replace(';', '')
.replace(',', '').replace(" ", "").split("&"))):
if not os.path.isfile(os.path.join(generic_directory, license_type)):
lf = bb.utils.lockfile("%s.lock" % logfile)
f = open(logfile, "a")
f.write("%s\t%s\t%s\n" % \
(pn,license_types,license_type))
f.close()
bb.utils.unlockfile(lf)
return
}
addtask checklicenseall after do_checklicense
do_checklicenseall[recrdeptask] = "do_checklicense"
do_checklicenseall[nostamp] = "1"
do_checklicenseall() {
:
}

View File

@@ -0,0 +1,5 @@
DEPENDS += "${@["python-native python", ""][(d.getVar('PACKAGES', 1) == '')]}"
RDEPENDS_${PN} += "${@['', 'python-core']['${PN}' == '${BPN}']}"
inherit distutils-common-base

View File

@@ -0,0 +1,21 @@
inherit python-dir
EXTRA_OEMAKE = ""
export STAGING_INCDIR
export STAGING_LIBDIR
PACKAGES = "${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
FILES_${PN} = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
FILES_${PN}-dev += "\
${datadir}/pkgconfig \
${libdir}/pkgconfig \
${PYTHON_SITEPACKAGES_DIR}/*.la \
"
FILES_${PN}-dbg += "\
${PYTHON_SITEPACKAGES_DIR}/.debug \
${PYTHON_SITEPACKAGES_DIR}/*/.debug \
${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
"

View File

@@ -0,0 +1,3 @@
DEPENDS += "${@["python-native", ""][(d.getVar('PACKAGES', 1) == '')]}"
inherit distutils-common-base

View File

@@ -0,0 +1,74 @@
inherit distutils-base
DISTUTILS_BUILD_ARGS ?= ""
DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
--install-data=${STAGING_DATADIR}"
DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
--install-data=${D}/${datadir}"
distutils_do_compile() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py build ${DISTUTILS_BUILD_ARGS} || \
bbfatal "python setup.py build_ext execution failed."
}
distutils_stage_headers() {
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
bbfatal "python setup.py install_headers execution failed."
}
distutils_stage_all() {
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
bbfatal "python setup.py install (stage) execution failed."
}
distutils_do_install() {
install -d ${D}${PYTHON_SITEPACKAGES_DIR}
STAGING_INCDIR=${STAGING_INCDIR} \
STAGING_LIBDIR=${STAGING_LIBDIR} \
PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
${STAGING_BINDIR_NATIVE}/python setup.py install ${DISTUTILS_INSTALL_ARGS} || \
bbfatal "python setup.py install execution failed."
for i in `find ${D} -name "*.py"` ; do \
sed -i -e s:${D}::g $i
done
if test -e ${D}${bindir} ; then
for i in ${D}${bindir}/* ; do \
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
fi
if test -e ${D}${sbindir}; then
for i in ${D}${sbindir}/* ; do \
sed -i -e s:${STAGING_BINDIR_NATIVE}:${bindir}:g $i
done
fi
rm -f ${D}${PYTHON_SITEPACKAGES_DIR}/easy-install.pth
#
# FIXME: Bandaid against wrong datadir computation
#
if test -e ${D}${datadir}/share; then
mv -f ${D}${datadir}/share/* ${D}${datadir}/
fi
# These are generated files, on really slow systems the storage/speed trade off
# might be worth it, but in general it isn't
find ${D}${libdir}/${PYTHON_DIR}/site-packages -iname '*.pyo' -exec rm {} \;
}
EXPORT_FUNCTIONS do_compile do_install

View File

@@ -0,0 +1,2 @@
# An empty bbclass to facilitate dynamic inherit, include,
# and require statements.

View File

@@ -0,0 +1,60 @@
DEPENDS += "gconf gconf-native"
# This is referenced by the gconf m4 macros and would default to the value hardcoded
# into gconf at compile time otherwise
export GCONF_SCHEMA_INSTALL_SOURCE = "xml:merged:${STAGING_DIR_NATIVE}${sysconfdir}/gconf/gconf.xml.defaults"
export GCONF_BACKEND_DIR = "${STAGING_LIBDIR_NATIVE}/GConf/2"
gconf_postinst() {
if [ "x$D" != "x" ]; then
exit 1
fi
SCHEMA_LOCATION=/etc/gconf/schemas
for SCHEMA in ${SCHEMA_FILES}; do
if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
gconftool-2 \
--makefile-install-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
fi
done
}
gconf_prerm() {
SCHEMA_LOCATION=/etc/gconf/schemas
for SCHEMA in ${SCHEMA_FILES}; do
if [ -e $SCHEMA_LOCATION/$SCHEMA ]; then
HOME=/root GCONF_CONFIG_SOURCE=`gconftool-2 --get-default-source` \
gconftool-2 \
--makefile-uninstall-rule $SCHEMA_LOCATION/$SCHEMA > /dev/null
fi
done
}
python populate_packages_append () {
import re
packages = d.getVar('PACKAGES', 1).split()
pkgdest = d.getVar('PKGDEST', 1)
for pkg in packages:
schema_dir = '%s/%s/etc/gconf/schemas' % (pkgdest, pkg)
schemas = []
schema_re = re.compile(".*\.schemas$")
if os.path.exists(schema_dir):
for f in os.listdir(schema_dir):
if schema_re.match(f):
schemas.append(f)
if schemas != []:
bb.note("adding gconf postinst and prerm scripts to %s" % pkg)
d.setVar('SCHEMA_FILES', " ".join(schemas))
postinst = d.getVar('pkg_postinst_%s' % pkg, 1) or d.getVar('pkg_postinst', 1)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gconf_postinst', 1)
d.setVar('pkg_postinst_%s' % pkg, postinst)
prerm = d.getVar('pkg_prerm_%s' % pkg, 1) or d.getVar('pkg_prerm', 1)
if not prerm:
prerm = '#!/bin/sh\n'
prerm += d.getVar('gconf_prerm', 1)
d.setVar('pkg_prerm_%s' % pkg, prerm)
}

View File

@@ -0,0 +1,21 @@
def gettext_dependencies(d):
if d.getVar('USE_NLS', True) == 'no' and not oe.utils.inherits(d, 'native', 'nativesdk', 'cross'):
return ""
if d.getVar('INHIBIT_DEFAULT_DEPS', True) and not oe.utils.inherits(d, 'cross-canadian'):
return ""
if oe.utils.inherits(d, 'native', 'cross'):
return "gettext-minimal-native"
return d.getVar('DEPENDS_GETTEXT', False)
def gettext_oeconf(d):
if oe.utils.inherits(d, 'native', 'cross'):
return '--disable-nls'
# Remove the NLS bits if USE_NLS is no.
if d.getVar('USE_NLS', True) == 'no' and not oe.utils.inherits(d, 'nativesdk', 'cross-canadian'):
return '--disable-nls'
return "--enable-nls"
DEPENDS_GETTEXT = "virtual/gettext gettext-native"
BASEDEPENDS =+ "${@gettext_dependencies(d)}"
EXTRA_OECONF_append = " ${@gettext_oeconf(d)}"

View File

@@ -0,0 +1,3 @@
inherit gnomebase gtk-icon-cache gconf mime
EXTRA_OECONF += "--enable-introspection=no"

View File

@@ -0,0 +1,30 @@
def gnome_verdir(v):
import re
m = re.match("^([0-9]+)\.([0-9]+)", v)
return "%s.%s" % (m.group(1), m.group(2))
SECTION ?= "x11/gnome"
SRC_URI = "${GNOME_MIRROR}/${BPN}/${@gnome_verdir("${PV}")}/${BPN}-${PV}.tar.bz2;name=archive"
DEPENDS += "gnome-common"
FILES_${PN} += "${datadir}/application-registry \
${datadir}/mime-info \
${datadir}/mime/packages \
${datadir}/mime/application \
${datadir}/gnome-2.0 \
${datadir}/polkit* \
${datadir}/GConf \
${datadir}/glib-2.0/schemas \
"
FILES_${PN}-doc += "${datadir}/devhelp"
inherit autotools pkgconfig
do_install_append() {
rm -rf ${D}${localstatedir}/lib/scrollkeeper/*
rm -rf ${D}${localstatedir}/scrollkeeper/*
rm -f ${D}${datadir}/applications/*.cache
}

View File

@@ -0,0 +1,126 @@
# grub-efi.bbclass
# Copyright (c) 2011, Intel Corporation.
# All rights reserved.
#
# Released under the MIT license (see packages/COPYING)
# Provide grub-efi specific functions for building bootable images.
# External variables
# ${INITRD} - indicates a filesystem image to use as an initrd (optional)
# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
# ${LABELS} - a list of targets for the automatic config
# ${APPEND} - an override list of append strings for each label
# ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
# ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
do_bootimg[depends] += "grub-efi-${TARGET_ARCH}-native:do_deploy"
GRUBCFG = "${S}/grub.cfg"
GRUB_TIMEOUT ?= "10"
#FIXME: build this from the machine config
GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
EFIDIR = "/EFI/BOOT"
GRUB_HDDDIR = "${HDDDIR}${EFIDIR}"
GRUB_ISODIR = "${ISODIR}${EFIDIR}"
grubefi_populate() {
DEST=$1
install -d ${DEST}
install -m 0644 ${STAGING_DIR_HOST}/kernel/bzImage ${DEST}/vmlinuz
if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
install -m 0644 ${INITRD} ${DEST}/initrd
fi
if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
fi
GRUB_IMAGE="bootia32.efi"
if [ "${TARGET_ARCH}" = "x86_64" ]; then
GRUB_IMAGE="bootx64.efi"
fi
install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}
install -m 0644 ${GRUBCFG} ${DEST}
}
grubefi_iso_populate() {
grubefi_populate ${GRUB_ISODIR}
}
grubefi_hddimg_populate() {
grubefi_populate ${GRUB_HDDDIR}
}
python build_grub_cfg() {
import sys
workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
labels = d.getVar('LABELS', True)
if not labels:
bb.debug(1, "LABELS not defined, nothing to do")
return
if labels == []:
bb.debug(1, "No labels, nothing to do")
return
cfile = d.getVar('GRUBCFG', True)
if not cfile:
raise bb.build.FuncFailed('Unable to read GRUBCFG')
try:
cfgfile = file(cfile, 'w')
except OSError:
raise bb.build.funcFailed('Unable to open %s' % (cfile))
cfgfile.write('# Automatically created by OE\n')
opts = d.getVar('GRUB_OPTS', True)
if opts:
for opt in opts.split(';'):
cfgfile.write('%s\n' % opt)
cfgfile.write('default=%s\n' % (labels.split()[0]))
timeout = d.getVar('GRUB_TIMEOUT', True)
if timeout:
cfgfile.write('timeout=%s\n' % timeout)
else:
cfgfile.write('timeout=50\n')
for label in labels.split():
localdata = d.createCopy()
overrides = localdata.getVar('OVERRIDES', True)
if not overrides:
raise bb.build.FuncFailed('OVERRIDES not defined')
localdata.setVar('OVERRIDES', label + ':' + overrides)
bb.data.update_data(localdata)
cfgfile.write('\nmenuentry \'%s\'{\n' % (label))
cfgfile.write('linux ${EFIDIR}/vmlinuz LABEL=%s' % (label))
append = localdata.getVar('APPEND', True)
initrd = localdata.getVar('INITRD', True)
if append:
cfgfile.write('%s' % (append))
cfgfile.write('\n')
if initrd:
cfgfile.write('initrd ${EFIDIR}/initrd')
cfgfile.write('\n}\n')
cfgfile.close()
}

View File

@@ -0,0 +1,4 @@
# We don't have gtk-doc so disable it
do_configure_prepend() {
echo "EXTRA_DIST=">> ${S}/gtk-doc.make
}

View File

@@ -0,0 +1,58 @@
FILES_${PN} += "${datadir}/icons/hicolor"
DEPENDS += "${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']}"
# This could run on the host as icon cache files are architecture independent,
# but there is no gtk-update-icon-cache built natively.
gtk_icon_cache_postinst() {
if [ "x$D" != "x" ]; then
exit 1
fi
# Update the pixbuf loaders in case they haven't been registered yet
GDK_PIXBUF_MODULEDIR=${libdir}/gdk-pixbuf-2.0/2.10.0/loaders gdk-pixbuf-query-loaders --update-cache
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
gtk-update-icon-cache -fqt $icondir
fi
done
}
gtk_icon_cache_postrm() {
for icondir in /usr/share/icons/* ; do
if [ -d $icondir ] ; then
gtk-update-icon-cache -qt $icondir
fi
done
}
python populate_packages_append () {
packages = d.getVar('PACKAGES', 1).split()
pkgdest = d.getVar('PKGDEST', 1)
for pkg in packages:
icon_dir = '%s/%s/%s/icons' % (pkgdest, pkg, d.getVar('datadir', 1))
if not os.path.exists(icon_dir):
continue
bb.note("adding hicolor-icon-theme dependency to %s" % pkg)
rdepends = d.getVar('RDEPENDS_%s' % pkg, 1)
rdepends = rdepends + ' ' + d.getVar('MLPREFIX') + "hicolor-icon-theme"
d.setVar('RDEPENDS_%s' % pkg, rdepends)
bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg)
postinst = d.getVar('pkg_postinst_%s' % pkg, 1) or d.getVar('pkg_postinst', 1)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('gtk_icon_cache_postinst', 1)
d.setVar('pkg_postinst_%s' % pkg, postinst)
postrm = d.getVar('pkg_postrm_%s' % pkg, 1) or d.getVar('pkg_postrm', 1)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('gtk_icon_cache_postrm', 1)
d.setVar('pkg_postrm_%s' % pkg, postrm)
}

View File

@@ -0,0 +1,220 @@
# IceCream distributed compiling support
#
# Stages directories with symlinks from gcc/g++ to icecc, for both
# native and cross compilers. Depending on each configure or compile,
# the directories are added at the head of the PATH list and ICECC_CXX
# and ICEC_CC are set.
#
# For the cross compiler, creates a tar.gz of our toolchain and sets
# ICECC_VERSION accordingly.
#
#The class now handles all 3 different compile 'stages' (i.e native ,cross-kernel and target) creating the
#necessary enviroment tar.gz file to be used by the remote machines.
#It also supports meta-toolchain generation
#
#If ICECC_PATH is not set in local.conf then the class will try to locate it using 'which'
#but nothing is sure ;)
#
#If ICECC_ENV_EXEC is set in local.conf should point to the icecc-create-env script provided by the user
#or the default one provided by icecc-create-env.bb will be used
#(NOTE that this is a modified version of the script need it and *not the one that comes with icecc*
#
#User can specify if specific packages or packages belonging to class should not use icecc to distribute
#compile jobs to remote machines, but handled localy, by defining ICECC_USER_CLASS_BL and ICECC_PACKAGE_BL
#with the appropriate values in local.conf
#########################################################################################
#Error checking is kept to minimum so double check any parameters you pass to the class
###########################################################################################
ICECC_ENV_EXEC ?= "${STAGING_BINDIR_NATIVE}/icecc-create-env"
def icecc_dep_prepend(d):
# INHIBIT_DEFAULT_DEPS doesn't apply to the patch command. Whether or not
# we need that built is the responsibility of the patch function / class, not
# the application.
if not d.getVar('INHIBIT_DEFAULT_DEPS'):
return "icecc-create-env-native"
return ""
DEPENDS_prepend += "${@icecc_dep_prepend(d)} "
def get_cross_kernel_cc(bb,d):
kernel_cc = bb.data.expand('${KERNEL_CC}', d)
kernel_cc = kernel_cc.replace('ccache', '').strip()
kernel_cc = kernel_cc.split(' ')[0]
kernel_cc = kernel_cc.strip()
return kernel_cc
def create_path(compilers, bb, d):
"""
Create Symlinks for the icecc in the staging directory
"""
staging = os.path.join(bb.data.expand('${STAGING_BINDIR}', d), "ice")
if icc_is_kernel(bb, d):
staging += "-kernel"
#check if the icecc path is set by the user
icecc = d.getVar('ICECC_PATH') or os.popen("which icecc").read()[:-1]
# Create the dir if necessary
try:
os.stat(staging)
except:
try:
os.makedirs(staging)
except:
pass
for compiler in compilers:
gcc_path = os.path.join(staging, compiler)
try:
os.stat(gcc_path)
except:
try:
os.symlink(icecc, gcc_path)
except:
pass
return staging
def use_icc(bb,d):
package_tmp = bb.data.expand('${PN}', d)
system_class_blacklist = [ "none" ]
user_class_blacklist = (d.getVar('ICECC_USER_CLASS_BL') or "none").split()
package_class_blacklist = system_class_blacklist + user_class_blacklist
for black in package_class_blacklist:
if bb.data.inherits_class(black, d):
#bb.note(package_tmp, ' class ', black, ' found in blacklist, disable icecc')
return "no"
#"system" package blacklist contains a list of packages that can not distribute compile tasks
#for one reason or the other
system_package_blacklist = [ "uclibc", "glibc", "gcc", "bind", "u-boot", "dhcp-forwarder", "enchant", "connman", "orbit2" ]
user_package_blacklist = (d.getVar('ICECC_USER_PACKAGE_BL') or "").split()
package_blacklist = system_package_blacklist + user_package_blacklist
for black in package_blacklist:
if black in package_tmp:
#bb.note(package_tmp, ' found in blacklist, disable icecc')
return "no"
if d.getVar('PARALLEL_MAKE') == "":
bb.note(package_tmp, " ", bb.data.expand('${PV}', d), " has empty PARALLEL_MAKE, disable icecc")
return "no"
return "yes"
def icc_is_kernel(bb, d):
return \
bb.data.inherits_class("kernel", d);
def icc_is_native(bb, d):
return \
bb.data.inherits_class("cross", d) or \
bb.data.inherits_class("native", d);
def icc_version(bb, d):
if use_icc(bb, d) == "no":
return ""
parallel = d.getVar('ICECC_PARALLEL_MAKE') or ""
d.setVar("PARALLEL_MAKE", parallel)
if icc_is_native(bb, d):
archive_name = "local-host-env"
elif bb.data.expand('${HOST_PREFIX}', d) == "":
bb.fatal(bb.data.expand("${PN}", d), " NULL prefix")
else:
prefix = bb.data.expand('${HOST_PREFIX}' , d)
distro = bb.data.expand('${DISTRO}', d)
target_sys = bb.data.expand('${TARGET_SYS}', d)
float = d.getVar('TARGET_FPU') or "hard"
archive_name = prefix + distro + "-" + target_sys + "-" + float
if icc_is_kernel(bb, d):
archive_name += "-kernel"
import socket
ice_dir = bb.data.expand('${STAGING_DIR_NATIVE}${prefix_native}', d)
tar_file = os.path.join(ice_dir, 'ice', archive_name + "-@VERSION@-" + socket.gethostname() + '.tar.gz')
return tar_file
def icc_path(bb,d):
if icc_is_kernel(bb, d):
return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
else:
prefix = bb.data.expand('${HOST_PREFIX}', d)
return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
def icc_get_tool(bb, d, tool):
if icc_is_native(bb, d):
return os.popen("which %s" % tool).read()[:-1]
elif icc_is_kernel(bb, d):
return os.popen("which %s" % get_cross_kernel_cc(bb, d)).read()[:-1]
else:
ice_dir = bb.data.expand('${STAGING_BINDIR_TOOLCHAIN}', d)
target_sys = bb.data.expand('${TARGET_SYS}', d)
return os.path.join(ice_dir, "%s-%s" % (target_sys, tool))
set_icecc_env() {
ICECC_VERSION="${@icc_version(bb, d)}"
if [ "x${ICECC_VERSION}" = "x" ]
then
return
fi
ICE_PATH="${@icc_path(bb, d)}"
if [ "x${ICE_PATH}" = "x" ]
then
return
fi
ICECC_CC="${@icc_get_tool(bb,d, "gcc")}"
ICECC_CXX="${@icc_get_tool(bb,d, "g++")}"
if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
then
return
fi
ICE_VERSION=`$ICECC_CC -dumpversion`
ICECC_VERSION=`echo ${ICECC_VERSION} | sed -e "s/@VERSION@/$ICE_VERSION/g"`
if [ ! -x "${ICECC_ENV_EXEC}" ]
then
return
fi
ICECC_AS="`${ICECC_CC} -print-prog-name=as`"
if [ "`dirname "${ICECC_AS}"`" = "." ]
then
ICECC_AS="`which as`"
fi
if [ ! -r "${ICECC_VERSION}" ]
then
mkdir -p "`dirname "${ICECC_VERSION}"`"
${ICECC_ENV_EXEC} "${ICECC_CC}" "${ICECC_CXX}" "${ICECC_AS}" "${ICECC_VERSION}"
fi
export ICECC_VERSION ICECC_CC ICECC_CXX
export PATH="$ICE_PATH:$PATH"
export CCACHE_PATH="$PATH"
}
do_configure_prepend() {
set_icecc_env
}
do_compile_prepend() {
set_icecc_env
}
do_compile_kernelmodules_prepend() {
set_icecc_env
}
#do_install_prepend() {
# set_icecc_env
#}

View File

View File

@@ -0,0 +1,14 @@
AUTO_SYSLINUXCFG = "1"
INITRD_IMAGE ?= "core-image-minimal-initramfs"
INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
APPEND += "root=/dev/ram0 "
TIMEOUT = "10"
LABELS += "boot install"
ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs"
do_bootimg[depends] += "${IMAGE_BASENAME}:do_rootfs"
inherit bootimg

View File

@@ -0,0 +1,72 @@
do_rootfs[depends] += "mklibs-native:do_populate_sysroot"
IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
mklibs_optimize_image_doit() {
rm -rf ${WORKDIR}/mklibs
mkdir -p ${WORKDIR}/mklibs/dest
cd ${IMAGE_ROOTFS}
du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
for i in `find .`; do file $i; done \
| grep ELF \
| grep "LSB executable" \
| grep "dynamically linked" \
| sed "s/:.*//" \
| sed "s+^\./++" \
> ${WORKDIR}/mklibs/executables.list
case ${TARGET_ARCH} in
powerpc | mips | microblaze )
dynamic_loader="${base_libdir}/ld.so.1"
;;
powerpc64)
dynamic_loader="${base_libdir}/ld64.so.1"
;;
x86_64)
dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
;;
i586 )
dynamic_loader="${base_libdir}/ld-linux.so.2"
;;
arm )
dynamic_loader="${base_libdir}/ld-linux.so.3"
;;
* )
dynamic_loader="/unknown_dynamic_linker"
;;
esac
mklibs -v \
--ldlib ${dynamic_loader} \
--sysroot ${PKG_CONFIG_SYSROOT_DIR} \
--root ${IMAGE_ROOTFS} \
--target `echo ${TARGET_PREFIX} | sed 's/-$//' ` \
-d ${WORKDIR}/mklibs/dest \
`cat ${WORKDIR}/mklibs/executables.list`
cd ${WORKDIR}/mklibs/dest
for i in *
do
cp $i `find ${IMAGE_ROOTFS} -name $i`
done
cd ${IMAGE_ROOTFS}
du -bs > ${WORKDIR}/mklibs/du.after.mklibs.txt
echo rootfs size before mklibs optimization: `cat ${WORKDIR}/mklibs/du.before.mklibs.txt`
echo rootfs size after mklibs optimization: `cat ${WORKDIR}/mklibs/du.after.mklibs.txt`
}
mklibs_optimize_image() {
for img in ${MKLIBS_OPTIMIZED_IMAGES}
do
if [ "${img}" = "${PN}" ] || [ "${img}" = "all" ]
then
mklibs_optimize_image_doit
break
fi
done
}
EXPORT_FUNCTIONS mklibs_optimize_image

View File

@@ -0,0 +1,35 @@
do_rootfs[depends] += "prelink-native:do_populate_sysroot"
IMAGE_PREPROCESS_COMMAND += "prelink_image; "
prelink_image () {
# export PSEUDO_DEBUG=4
# /bin/env | /bin/grep PSEUDO
# echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH"
# echo "LD_PRELOAD=$LD_PRELOAD"
pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
echo "Size before prelinking $pre_prelink_size."
# We need a prelink conf on the filesystem, add one if it's missing
if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \
${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
dummy_prelink_conf=true;
else
dummy_prelink_conf=false;
fi
# prelink!
${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf
# Remove the prelink.conf if we had to add it.
if [ "$dummy_prelink_conf" = "true" ]; then
rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
fi
pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
echo "Size after prelinking $pre_prelink_size."
}
EXPORT_FUNCTIONS prelink_image

View File

@@ -0,0 +1,98 @@
HOST_DATA ?= "${TMPDIR}/host-contamination-data/"
SWABBER_REPORT ?= "${LOG_DIR}/swabber/"
SWABBER_LOGS ?= "${LOG_DIR}/contamination-logs"
TRACE_LOGDIR ?= "${SWABBER_LOGS}/${PACKAGE_ARCH}"
TRACE_LOGFILE = "${TRACE_LOGDIR}/${PN}-${PV}"
SWAB_ORIG_TASK := "${BB_DEFAULT_TASK}"
BB_DEFAULT_TASK = "generate_swabber_report"
# Several recipes don't build with parallel make when run under strace
# Ideally these should be fixed but as a temporary measure disable parallel
# builds for troublesome recipes
PARALLEL_MAKE_pn-openssl = ""
PARALLEL_MAKE_pn-eglibc = ""
PARALLEL_MAKE_pn-glib-2.0 = ""
PARALLEL_MAKE_pn-libxml2 = ""
PARALLEL_MAKE_pn-readline = ""
PARALLEL_MAKE_pn-util-linux = ""
PARALLEL_MAKE_pn-binutils = ""
PARALLEL_MAKE_pn-bison = ""
PARALLEL_MAKE_pn-cmake = ""
PARALLEL_MAKE_pn-elfutils = ""
PARALLEL_MAKE_pn-gcc = ""
PARALLEL_MAKE_pn-gcc-runtime = ""
PARALLEL_MAKE_pn-m4 = ""
PARALLEL_MAKE_pn-opkg = ""
PARALLEL_MAKE_pn-pkgconfig = ""
PARALLEL_MAKE_pn-prelink = ""
PARALLEL_MAKE_pn-qemugl = ""
PARALLEL_MAKE_pn-rpm = ""
PARALLEL_MAKE_pn-tcl = ""
PARALLEL_MAKE_pn-beecrypt = ""
PARALLEL_MAKE_pn-curl = ""
PARALLEL_MAKE_pn-gmp = ""
PARALLEL_MAKE_pn-libmpc = ""
PARALLEL_MAKE_pn-libxslt = ""
PARALLEL_MAKE_pn-lzo = ""
PARALLEL_MAKE_pn-popt = ""
PARALLEL_MAKE_pn-linux-wrs = ""
PARALLEL_MAKE_pn-libgcrypt = ""
PARALLEL_MAKE_pn-gpgme = ""
PARALLEL_MAKE_pn-udev = ""
PARALLEL_MAKE_pn-gnutls = ""
PARALLEL_MAKE_pn-sat-solver = ""
PARALLEL_MAKE_pn-libzypp = ""
PARALLEL_MAKE_pn-zypper = ""
python() {
# NOTE: It might be useful to detect host infection on native and cross
# packages but as it turns out to be pretty hard to do this for all native
# and cross packages which aren't swabber-native or one of its dependencies
# I have ignored them for now...
if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
deps = (d.getVarFlag('do_setscene', 'depends') or "").split()
deps.append('strace-native:do_populate_sysroot')
d.setVarFlag('do_setscene', 'depends', " ".join(deps))
logdir = bb.data.expand("${TRACE_LOGDIR}", d)
bb.utils.mkdirhier(logdir)
else:
d.setVar('STRACEFUNC', '')
}
STRACEPID = "${@os.getpid()}"
STRACEFUNC = "imageswab_attachstrace"
do_configure[prefuncs] += "${STRACEFUNC}"
do_compile[prefuncs] += "${STRACEFUNC}"
imageswab_attachstrace () {
STRACE=`which strace`
if [ -x "$STRACE" ]; then
swabber-strace-attach "$STRACE -f -o ${TRACE_LOGFILE}-${BB_CURRENTTASK}.log -e trace=open,execve -p ${STRACEPID}" "${TRACE_LOGFILE}-traceattach-${BB_CURRENTTASK}.log"
fi
}
do_generate_swabber_report () {
update_distro ${HOST_DATA}
# Swabber can't create the directory for us
mkdir -p ${SWABBER_REPORT}
REPORTSTAMP=${SWAB_ORIG_TASK}-`date +%2m%2d%2H%2M%Y`
if [ `which ccache` ] ; then
CCACHE_DIR=`( ccache -s | grep "cache directory" | grep -o '[^ ]*$' 2> /dev/null )`
fi
if [ "$(ls -A ${HOST_DATA})" ]; then
echo "Generating swabber report"
swabber -d ${HOST_DATA} -l ${SWABBER_LOGS} -o ${SWABBER_REPORT}/report-${REPORTSTAMP}.txt -r ${SWABBER_REPORT}/extra_report-${REPORTSTAMP}.txt -c all -p ${TOPDIR} -f ${OEROOT}/meta/conf/swabber ${TOPDIR} ${OEROOT} ${CCACHE_DIR}
else
echo "No host data, cannot generate swabber report."
fi
}
addtask generate_swabber_report after do_${SWAB_ORIG_TASK}
do_generate_swabber_report[depends] = "swabber-native:do_populate_sysroot"

View File

@@ -0,0 +1,375 @@
inherit rootfs_${IMAGE_PKGTYPE}
IMAGETEST ?= "dummy"
inherit imagetest-${IMAGETEST}
LICENSE = "MIT"
PACKAGES = ""
RDEPENDS += "${IMAGE_INSTALL} ${LINGUAS_INSTALL} ${NORMAL_FEATURE_INSTALL}"
RRECOMMENDS += "${NORMAL_FEATURE_INSTALL_OPTIONAL}"
INHIBIT_DEFAULT_DEPS = "1"
# IMAGE_FEATURES may contain any available package group
IMAGE_FEATURES ?= ""
IMAGE_FEATURES[type] = "list"
# packages to install from features
FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
# packages to install from features, excluding dev/dbg/doc
NORMAL_FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(normal_groups(d), d))}"
NORMAL_FEATURE_INSTALL_OPTIONAL = "${@' '.join(oe.packagegroup.optional_packages(normal_groups(d), d))}"
def normal_groups(d):
"""Return all the IMAGE_FEATURES, with the exception of our special package groups"""
extras = set(['dev-pkgs', 'doc-pkgs', 'dbg-pkgs'])
features = set(oe.data.typed_value('IMAGE_FEATURES', d))
return features.difference(extras)
def normal_pkgs_to_install(d):
import oe.packagedata
to_install = oe.data.typed_value('IMAGE_INSTALL', d)
features = normal_groups(d)
required = list(oe.packagegroup.required_packages(features, d))
optional = list(oe.packagegroup.optional_packages(features, d))
all_packages = to_install + required + optional
recipes = filter(None, [oe.packagedata.recipename(pkg, d) for pkg in all_packages])
return all_packages + recipes
PACKAGE_GROUP_dbg-pkgs = "${@' '.join('%s-dbg' % pkg for pkg in normal_pkgs_to_install(d))}"
PACKAGE_GROUP_dbg-pkgs[optional] = "1"
PACKAGE_GROUP_dev-pkgs = "${@' '.join('%s-dev' % pkg for pkg in normal_pkgs_to_install(d))}"
PACKAGE_GROUP_dev-pkgs[optional] = "1"
PACKAGE_GROUP_doc-pkgs = "${@' '.join('%s-doc' % pkg for pkg in normal_pkgs_to_install(d))}"
PACKAGE_GROUP_doc-pkgs[optional] = "1"
# "export IMAGE_BASENAME" not supported at this time
IMAGE_INSTALL ?= ""
IMAGE_INSTALL[type] = "list"
IMAGE_BASENAME[export] = "1"
export PACKAGE_INSTALL ?= "${IMAGE_INSTALL} ${FEATURE_INSTALL}"
PACKAGE_INSTALL_ATTEMPTONLY ?= "${FEATURE_INSTALL_OPTIONAL}"
# Images are generally built explicitly, do not need to be part of world.
EXCLUDE_FROM_WORLD = "1"
USE_DEVFS ?= "0"
PID = "${@os.getpid()}"
PACKAGE_ARCH = "${MACHINE_ARCH}"
LDCONFIGDEPEND ?= "ldconfig-native:do_populate_sysroot"
LDCONFIGDEPEND_libc-uclibc = ""
do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
IMAGE_TYPE = ${@base_contains("IMAGE_FSTYPES", "live", "live", "empty", d)}
inherit image-${IMAGE_TYPE}
python () {
deps = d.getVarFlag('do_rootfs', 'depends') or ""
for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
for dep in ((d.getVar('IMAGE_DEPENDS_%s' % type) or "").split() or []):
deps += " %s:do_populate_sysroot" % dep
for dep in (d.getVar('EXTRA_IMAGEDEPENDS', True) or "").split():
deps += " %s:do_populate_sysroot" % dep
d.setVarFlag('do_rootfs', 'depends', deps)
# If we don't do this we try and run the mapping hooks while parsing which is slow
# bitbake should really provide something to let us know this...
if d.getVar('BB_WORKERCONTEXT', True) is not None:
runtime_mapping_rename("PACKAGE_INSTALL", d)
runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", d)
}
#
# Get a list of files containing device tables to create.
# * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
# * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
# for in the BBPATH
# If neither are specified then the default name of files/device_table-minimal.txt
# is searched for in the BBPATH (same as the old version.)
#
def get_devtable_list(d):
devtable = d.getVar('IMAGE_DEVICE_TABLE', 1)
if devtable != None:
return devtable
str = ""
devtables = d.getVar('IMAGE_DEVICE_TABLES', 1)
if devtables == None:
devtables = 'files/device_table-minimal.txt'
for devtable in devtables.split():
str += " %s" % bb.which(d.getVar('BBPATH', 1), devtable)
return str
IMAGE_CLASSES ?= "image_types"
inherit ${IMAGE_CLASSES}
IMAGE_POSTPROCESS_COMMAND ?= ""
MACHINE_POSTPROCESS_COMMAND ?= ""
ROOTFS_POSTPROCESS_COMMAND ?= ""
# some default locales
IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
LINGUAS_INSTALL ?= "${@" ".join(map(lambda s: "locale-base-%s" % s, d.getVar('IMAGE_LINGUAS', 1).split()))}"
PSEUDO_PASSWD = "${IMAGE_ROOTFS}"
do_rootfs[nostamp] = "1"
do_rootfs[dirs] = "${TOPDIR}"
do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
do_build[nostamp] = "1"
# Must call real_do_rootfs() from inside here, rather than as a separate
# task, so that we have a single fakeroot context for the whole process.
do_rootfs[umask] = 022
fakeroot do_rootfs () {
#set -x
# When use the rpm incremental image generation, don't remove the rootfs
if [ "${INC_RPM_IMAGE_GEN}" != "1" -o "${IMAGE_PKGTYPE}" != "rpm" ]; then
rm -rf ${IMAGE_ROOTFS}
fi
rm -rf ${MULTILIB_TEMP_ROOTFS}
mkdir -p ${IMAGE_ROOTFS}
mkdir -p ${DEPLOY_DIR_IMAGE}
cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOY_DIR_IMAGE}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
# If "${IMAGE_ROOTFS}/dev" exists, then the device had been made by
# the previous build
if [ "${USE_DEVFS}" != "1" -a ! -r "${IMAGE_ROOTFS}/dev" ]; then
for devtable in ${@get_devtable_list(d)}; do
# Always return ture since there maybe already one when use the
# incremental image generation
makedevs -r ${IMAGE_ROOTFS} -D $devtable
done
fi
rootfs_${IMAGE_PKGTYPE}_do_rootfs
insert_feed_uris
if [ "x${LDCONFIGDEPEND}" != "x" ]; then
# Run ldconfig on the image to create a valid cache
# (new format for cross arch compatibility)
echo executing: ldconfig -r ${IMAGE_ROOTFS} -c new -v
ldconfig -r ${IMAGE_ROOTFS} -c new -v
fi
# (re)create kernel modules dependencies
# This part is done by kernel-module-* postinstall scripts but if image do
# not contains modules at all there are few moments in boot sequence with
# "unable to open modules.dep" message.
if [ -e ${STAGING_KERNEL_DIR}/kernel-abiversion ]; then
KERNEL_VERSION=`cat ${STAGING_KERNEL_DIR}/kernel-abiversion`
mkdir -p ${IMAGE_ROOTFS}/lib/modules/$KERNEL_VERSION
${TARGET_PREFIX}depmod -a -b ${IMAGE_ROOTFS} -F ${STAGING_KERNEL_DIR}/System.map-$KERNEL_VERSION $KERNEL_VERSION
fi
${IMAGE_PREPROCESS_COMMAND}
${@get_imagecmds(d)}
${IMAGE_POSTPROCESS_COMMAND}
${MACHINE_POSTPROCESS_COMMAND}
}
insert_feed_uris () {
echo "Building feeds for [${DISTRO}].."
for line in ${FEED_URIS}
do
# strip leading and trailing spaces/tabs, then split into name and uri
line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
echo "Added $feed_name feed with URL $feed_uri"
# insert new feed-sources
echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
done
}
log_check() {
for target in $*
do
lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
echo "log_check: Using $lf_path as logfile"
if test -e "$lf_path"
then
${IMAGE_PKGTYPE}_log_check $target $lf_path
else
echo "Cannot find logfile [$lf_path]"
fi
echo "Logfile is clean"
done
}
MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|"
MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
multilib_generate_python_file() {
cat >${MULTILIB_CHECK_FILE} <<EOF
import sys, os, os.path
import re,filecmp
allow_rep=re.compile(re.sub("\|$","","${MULTILIBRE_ALLOW_REP}"))
error_promt="Multilib check error:"
files={}
dirs=raw_input()
for dir in dirs.split():
for root, subfolers, subfiles in os.walk(dir):
for file in subfiles:
item=os.path.join(root,file)
key=str(os.path.join("/",os.path.relpath(item,dir)))
valid=True;
if files.has_key(key):
#check whether the file is allow to replace
if allow_rep.match(key):
valid=True
else:
if not filecmp.cmp(files[key],item):
valid=False
print("%s duplicate files %s %s is not the same\n" % (error_promt, item, files[key]))
sys.exit(1)
#pass the check, add to list
if valid:
files[key]=item
EOF
}
multilib_sanity_check() {
multilib_generate_python_file
echo $@ | python ${MULTILIB_CHECK_FILE}
}
get_split_linguas() {
for translation in ${IMAGE_LINGUAS}; do
translation_split=$(echo ${translation} | awk -F '-' '{print $1}')
echo ${translation}
echo ${translation_split}
done | sort | uniq
}
rootfs_install_all_locales() {
# Generate list of installed packages for which additional locale packages might be available
INSTALLED_PACKAGES=`list_installed_packages | egrep -v -- "(-locale-|^locale-base-|-dev$|-doc$|^kernel|^glibc|^ttf|^task|^perl|^python)"`
# Generate a list of locale packages that exist
SPLIT_LINGUAS=`get_split_linguas`
PACKAGES_TO_INSTALL=""
for lang in $SPLIT_LINGUAS; do
for pkg in $INSTALLED_PACKAGES; do
existing_pkg=`rootfs_check_package_exists $pkg-locale-$lang`
if [ "$existing_pkg" != "" ]; then
PACKAGES_TO_INSTALL="$PACKAGES_TO_INSTALL $existing_pkg"
fi
done
done
# Install the packages, if any
if [ "$PACKAGES_TO_INSTALL" != "" ]; then
rootfs_install_packages $PACKAGES_TO_INSTALL
fi
# Workaround for broken shell function dependencies
if false ; then
get_split_linguas
list_installed_packages
rootfs_check_package_exists
fi
}
# set '*' as the root password so the images
# can decide if they want it or not
zap_root_password () {
sed 's%^root:[^:]*:%root:*:%' < ${IMAGE_ROOTFS}/etc/passwd >${IMAGE_ROOTFS}/etc/passwd.new
mv ${IMAGE_ROOTFS}/etc/passwd.new ${IMAGE_ROOTFS}/etc/passwd
}
# Turn any symbolic /sbin/init link into a file
remove_init_link () {
if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
rm ${IMAGE_ROOTFS}/sbin/init
cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
fi
}
make_zimage_symlink_relative () {
if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
(cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
fi
}
write_image_manifest () {
rootfs_${IMAGE_PKGTYPE}_write_manifest
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
ln -s ${IMAGE_NAME}.rootfs.manifest ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.manifest
}
# Make login manager(s) enable automatic login.
# Useful for devices where we do not want to log in at all (e.g. phones)
set_image_autologin () {
sed -i 's%^AUTOLOGIN=\"false"%AUTOLOGIN="true"%g' ${IMAGE_ROOTFS}/etc/sysconfig/gpelogin
}
# Can be use to create /etc/timestamp during image construction to give a reasonably
# sane default time setting
rootfs_update_timestamp () {
date -u +%2m%2d%2H%2M%4Y >${IMAGE_ROOTFS}/etc/timestamp
}
# Prevent X from being started
rootfs_no_x_startup () {
if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
fi
}
rootfs_trim_schemas () {
for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
do
# Need this in case no files exist
if [ -e $schema ]; then
oe-trim-schemas $schema > $schema.new
mv $schema.new $schema
fi
done
}
EXPORT_FUNCTIONS zap_root_password remove_init_link do_rootfs make_zimage_symlink_relative set_image_autologin rootfs_update_timestamp rootfs_no_x_startup
do_fetch[noexec] = "1"
do_unpack[noexec] = "1"
do_patch[noexec] = "1"
do_configure[noexec] = "1"
do_compile[noexec] = "1"
do_install[noexec] = "1"
do_populate_sysroot[noexec] = "1"
do_package[noexec] = "1"
do_package_write_ipk[noexec] = "1"
do_package_write_deb[noexec] = "1"
do_package_write_rpm[noexec] = "1"
addtask rootfs before do_build

View File

@@ -0,0 +1,174 @@
def get_imagecmds(d):
cmds = "\n"
old_overrides = d.getVar('OVERRIDES', 0)
types = d.getVar('IMAGE_FSTYPES', True).split()
# Live images will be processed via inheriting bbclass and
# does not get processed here.
# live images also depend on ext3 so ensure its present
if "live" in types:
if "ext3" not in types:
types.append("ext3")
types.remove("live")
for type in types:
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
bb.data.update_data(localdata)
localdata.setVar('type', type)
cmd = localdata.getVar("IMAGE_CMD", True)
localdata.setVar('cmd', cmd)
cmds += localdata.getVar("runimagecmd", True)
return cmds
runimagecmd () {
# Image generation code for image type ${type}
ROOTFS_SIZE=`du -ks ${IMAGE_ROOTFS}|awk '{base_size = ($1 * ${IMAGE_OVERHEAD_FACTOR}); OFMT = "%.0f" ; print ((base_size > ${IMAGE_ROOTFS_SIZE} ? base_size : ${IMAGE_ROOTFS_SIZE}) + ${IMAGE_ROOTFS_EXTRA_SPACE}) }'`
${cmd}
cd ${DEPLOY_DIR_IMAGE}/
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.${type}
ln -s ${IMAGE_NAME}.rootfs.${type} ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.${type}
}
XZ_COMPRESSION_LEVEL ?= "-e -9"
XZ_INTEGRITY_CHECK ?= "crc32"
IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 -n ${EXTRA_IMAGECMD}"
IMAGE_CMD_sum.jffs2 = "${IMAGE_CMD_jffs2} && sumtool -i ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 \
-o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.sum.jffs2 -n ${EXTRA_IMAGECMD}"
IMAGE_CMD_cramfs = "mkcramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}"
IMAGE_CMD_ext2 () {
rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN} && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
genext2fs -b $ROOTFS_SIZE -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2
mv ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext2
rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
}
IMAGE_CMD_ext2.gz () {
rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN} && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
genext2fs -b $ROOTFS_SIZE -i 4096 -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2
gzip -f -9 ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2
mv ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext2.gz ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext2.gz
rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
}
IMAGE_CMD_ext2.bz2 () {
rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz
genext2fs -b $ROOTFS_SIZE -i 4096 -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/tmp.gz/${IMAGE_NAME}.rootfs.ext2
bzip2 -f -9 ${DEPLOY_DIR_IMAGE}/tmp.gz/${IMAGE_NAME}.rootfs.ext2
mv ${DEPLOY_DIR_IMAGE}/tmp.gz/${IMAGE_NAME}.rootfs.ext2.bz2 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext2.bz2
rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz
}
IMAGE_CMD_ext2.lzma () {
rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz
genext2fs -b $ROOTFS_SIZE -i 4096 -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/tmp.gz/${IMAGE_NAME}.rootfs.ext2
lzma -f -7 ${DEPLOY_DIR_IMAGE}/tmp.gz/${IMAGE_NAME}.rootfs.ext2
mv ${DEPLOY_DIR_IMAGE}/tmp.gz/${IMAGE_NAME}.rootfs.ext2.lzma ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext2.lzma
rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz
}
IMAGE_CMD_ext3 () {
genext2fs -b $ROOTFS_SIZE -i 4096 -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext3
tune2fs -j ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext3
}
IMAGE_CMD_ext3.gz () {
rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN} && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
genext2fs -b $ROOTFS_SIZE -i 4096 -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext3
tune2fs -j ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext3
gzip -f -9 ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext3
mv ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext3.gz ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext3.gz
rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
}
oe_mkext4fs () {
genext2fs -b $ROOTFS_SIZE -i 4096 -d ${IMAGE_ROOTFS} ${EXTRA_IMAGECMD} $1
tune2fs -O extents,uninit_bg,dir_index,has_journal $1
e2fsck -yfDC0 $1 || chk=$?
case $chk in
0|1|2)
;;
*)
return $chk
;;
esac
}
IMAGE_CMD_ext4 () {
oe_mkext4fs ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext4
}
IMAGE_CMD_ext4.gz () {
rm -rf ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN} && mkdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
oe_mkext4fs ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext4
gzip -f -9 ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext4
mv ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}/${IMAGE_NAME}.rootfs.ext4.gz ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ext4.gz
rmdir ${DEPLOY_DIR_IMAGE}/tmp.gz-${PN}
}
IMAGE_CMD_btrfs () {
mkfs.btrfs -b `expr ${ROOTFS_SIZE} \* 1024` ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
}
IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
IMAGE_CMD_squashfs-lzma = "mksquashfs-lzma ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzma ${EXTRA_IMAGECMD} -noappend"
IMAGE_CMD_tar = "cd ${IMAGE_ROOTFS} && tar -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar ."
IMAGE_CMD_tar.gz = "cd ${IMAGE_ROOTFS} && tar -zcvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar.gz ."
IMAGE_CMD_tar.bz2 = "cd ${IMAGE_ROOTFS} && tar -jcvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar.bz2 ."
IMAGE_CMD_tar.xz = "cd ${IMAGE_ROOTFS} && tar --xz -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar.xz ."
IMAGE_CMD_cpio () {
touch ${IMAGE_ROOTFS}/init
cd ${IMAGE_ROOTFS} && (find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
}
IMAGE_CMD_cpio.gz () {
touch ${IMAGE_ROOTFS}/init
cd ${IMAGE_ROOTFS} && (find . | cpio -o -H newc | gzip -c -9 >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.gz)
}
IMAGE_CMD_cpio.xz = "type cpio >/dev/null; cd ${IMAGE_ROOTFS} && (find . | cpio -o -H newc | xz -c ${XZ_COMPRESSION_LEVEL} --check=${XZ_INTEGRITY_CHECK} > ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.xz) ${EXTRA_IMAGECMD}"
IMAGE_CMD_cpio.lzma = "type cpio >/dev/null; cd ${IMAGE_ROOTFS} && (find . | cpio -o -H newc | xz --format=lzma -c ${XZ_COMPRESSION_LEVEL} --check=${XZ_INTEGRITY_CHECK} >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.lzma) ${EXTRA_IMAGECMD}"
UBI_VOLNAME ?= "${MACHINE}-rootfs"
IMAGE_CMD_ubi () {
echo \[ubifs\] > ubinize.cfg
echo mode=ubi >> ubinize.cfg
echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs >> ubinize.cfg
echo vol_id=0 >> ubinize.cfg
echo vol_type=dynamic >> ubinize.cfg
echo vol_name=${UBI_VOLNAME} >> ubinize.cfg
echo vol_flags=autoresize >> ubinize.cfg
mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS} && ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubi ${UBINIZE_ARGS} ubinize.cfg
}
IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
EXTRA_IMAGECMD = ""
EXTRA_IMAGECMD_jffs2 ?= "--pad --little-endian --eraseblock=0x40000"
# Change these if you want default genext2fs behavior (i.e. create minimal inode number)
EXTRA_IMAGECMD_ext2 ?= "-i 8192"
EXTRA_IMAGECMD_ext2.gz ?= "-i 8192"
EXTRA_IMAGECMD_ext3 ?= "-i 8192"
EXTRA_IMAGECMD_ext3.gz ?= "-i 8192"
EXTRA_IMAGECMD_btrfs ?= ""
IMAGE_DEPENDS = ""
IMAGE_DEPENDS_jffs2 = "mtd-utils-native"
IMAGE_DEPENDS_sum.jffs2 = "mtd-utils-native"
IMAGE_DEPENDS_cramfs = "cramfs-native"
IMAGE_DEPENDS_ext2 = "genext2fs-native"
IMAGE_DEPENDS_ext2.gz = "genext2fs-native"
IMAGE_DEPENDS_ext2.bz2 = "genext2fs-native"
IMAGE_DEPENDS_ext2.lzma = "genext2fs-native xz-native"
IMAGE_DEPENDS_ext3 = "genext2fs-native e2fsprogs-native"
IMAGE_DEPENDS_ext3.gz = "genext2fs-native e2fsprogs-native"
IMAGE_DEPENDS_ext4 = "genext2fs-native e2fsprogs-native"
IMAGE_DEPENDS_ext4.gz = "genext2fs-native e2fsprogs-native"
IMAGE_DEPENDS_btrfs = "btrfs-tools-native"
IMAGE_DEPENDS_squashfs = "squashfs-tools-native"
IMAGE_DEPENDS_squashfs-lzma = "squashfs-lzma-tools-native"
IMAGE_DEPENDS_tar.xz = "tar-native xz-native"
IMAGE_DEPENDS_cpio.lzma = "xz-native"
IMAGE_DEPENDS_cpio.xz = "xz-native"
IMAGE_DEPENDS_ubi = "mtd-utils-native"
IMAGE_DEPENDS_ubifs = "mtd-utils-native"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = "jffs2 sum.jffs2 cramfs ext2 ext2.gz ext2.bz2 ext3 ext3.gz ext2.lzma btrfs live squashfs squashfs-lzma ubi tar tar.gz tar.bz2 tar.xz cpio cpio.gz cpio.xz cpio.lzma"

View File

@@ -0,0 +1,32 @@
inherit image_types kernel-arch
oe_mkimage () {
mkimage -A ${UBOOT_ARCH} -O linux -T ramdisk -C $2 -n ${IMAGE_NAME} \
-d ${DEPLOY_DIR_IMAGE}/$1 ${DEPLOY_DIR_IMAGE}/$1.u-boot
}
IMAGE_DEPENDS_ext2.u-boot = "genext2fs-native e2fsprogs-native u-boot-mkimage-native"
IMAGE_CMD_ext2.u-boot = "${IMAGE_CMD_ext2} \
oe_mkimage ${IMAGE_NAME}.rootfs.ext2 none"
IMAGE_DEPENDS_ext2.gz.u-boot = "genext2fs-native e2fsprogs-native u-boot-mkimage-native"
IMAGE_CMD_ext2.gz.u-boot = "${IMAGE_CMD_ext2.gz} \
oe_mkimage ${IMAGE_NAME}.rootfs.ext2.gz gzip"
IMAGE_DEPENDS_ext2.bz2.u-boot = "genext2fs-native e2fsprogs-native u-boot-mkimage-native"
IMAGE_CMD_ext2.bz2.u-boot = "${IMAGE_CMD_ext2.bz2} \
oe_mkimage ${IMAGE_NAME}.rootfs.ext2.bz2 bzip2"
IMAGE_DEPENDS_ext2.lzma.u-boot = "genext2fs-native e2fsprogs-native u-boot-mkimage-native"
IMAGE_CMD_ext2.lzma.u-boot = "${IMAGE_CMD_ext2.lzma} \
oe_mkimage ${IMAGE_NAME}.rootfs.ext2.lzma lzma"
IMAGE_DEPENDS_ext3.gz.u-boot = "genext2fs-native e2fsprogs-native u-boot-mkimage-native"
IMAGE_CMD_ext3.gz.u-boot = "${IMAGE_CMD_ext3.gz} \
oe_mkimage ${IMAGE_NAME}.rootfs.ext3.gz gzip"
IMAGE_DEPENDS_ext4.gz.u-boot = "genext2fs-native e2fsprogs-native u-boot-mkimage-native"
IMAGE_CMD_ext4.gz.u-boot = "${IMAGE_CMD_ext4.gz} \
oe_mkimage ${IMAGE_NAME}.rootfs.ext4.gz gzip"
IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot"

View File

@@ -0,0 +1 @@
# dummy testclass file

View File

@@ -0,0 +1,223 @@
# Test related variables
# By default, TEST_DIR is created under WORKDIR
TEST_DIR ?= "${WORKDIR}/qemuimagetest"
TEST_LOG ?= "${LOG_DIR}/qemuimagetests"
TEST_RESULT ?= "${TEST_DIR}/result"
TEST_TMP ?= "${TEST_DIR}/tmp"
TEST_SCEN ?= "sanity"
TEST_STATUS ?= "${TEST_TMP}/status"
TARGET_IPSAVE ?= "${TEST_TMP}/target_ip"
TEST_SERIALIZE ?= "1"
python do_qemuimagetest() {
qemuimagetest_main(d)
}
addtask qemuimagetest before do_build after do_rootfs
do_qemuimagetest[nostamp] = "1"
do_qemuimagetest[depends] += "qemu-native:do_populate_sysroot"
python do_qemuimagetest_standalone() {
qemuimagetest_main(d)
}
addtask qemuimagetest_standalone
do_qemuimagetest_standalone[nostamp] = "1"
do_qemuimagetest_standalone[depends] += "qemu-native:do_populate_sysroot"
def qemuimagetest_main(d):
import sys
import re
import os
import shutil
"""
Test Controller for automated testing.
"""
casestr = re.compile(r'(?P<scen>\w+\b):(?P<case>\S+$)')
resultstr = re.compile(r'\s*(?P<case>\w+)\s*(?P<pass>\d+)\s*(?P<fail>\d+)\s*(?P<noresult>\d+)')
machine = d.getVar('MACHINE', 1)
pname = d.getVar('PN', 1)
"""function to save test cases running status"""
def teststatus(test, status, index, length):
test_status = d.getVar('TEST_STATUS', 1)
if not os.path.exists(test_status):
raise bb.build.FuncFailed("No test status file existing under TEST_TMP")
f = open(test_status, "w")
f.write("\t%-15s%-15s%-15s%-15s\n" % ("Case", "Status", "Number", "Total"))
f.write("\t%-15s%-15s%-15s%-15s\n" % (case, status, index, length))
f.close()
"""funtion to run each case under scenario"""
def runtest(scen, case, fulltestpath):
resultpath = d.getVar('TEST_RESULT', 1)
tmppath = d.getVar('TEST_TMP', 1)
"""initialize log file for testcase"""
logpath = d.getVar('TEST_LOG', 1)
bb.utils.mkdirhier("%s/%s" % (logpath, scen))
caselog = os.path.join(logpath, "%s/log_%s.%s" % (scen, case, d.getVar('DATETIME', 1)))
os.system("touch %s" % caselog)
"""export TEST_TMP, TEST_RESULT, DEPLOY_DIR and QEMUARCH"""
os.environ["PATH"] = d.getVar("PATH", True)
os.environ["TEST_TMP"] = tmppath
os.environ["TEST_RESULT"] = resultpath
os.environ["DEPLOY_DIR"] = d.getVar("DEPLOY_DIR", True)
os.environ["QEMUARCH"] = machine
os.environ["QEMUTARGET"] = pname
os.environ["DISPLAY"] = d.getVar("DISPLAY", True)
os.environ["COREBASE"] = d.getVar("COREBASE", True)
os.environ["TOPDIR"] = d.getVar("TOPDIR", True)
os.environ["OE_TMPDIR"] = d.getVar("TMPDIR", True)
os.environ["TEST_STATUS"] = d.getVar("TEST_STATUS", True)
os.environ["TARGET_IPSAVE"] = d.getVar("TARGET_IPSAVE", True)
os.environ["TEST_SERIALIZE"] = d.getVar("TEST_SERIALIZE", True)
os.environ["SDK_NAME"] = d.getVar("SDK_NAME", True)
"""run Test Case"""
bb.note("Run %s test in scenario %s" % (case, scen))
os.system("%s" % fulltestpath)
"""function to check testcase list and remove inappropriate cases"""
def check_list(list):
final_list = []
for test in list:
(scen, case, fullpath) = test
"""Skip rpm/zypper if package_rpm not set for PACKAGE_CLASSES"""
if case.find("zypper") != -1 or case.find("rpm") != -1:
if d.getVar("PACKAGE_CLASSES", True).find("rpm", 0, 11) == -1:
bb.note("skip rpm/zypper cases since package_rpm not set in PACKAGE_CLASSES")
continue
else:
final_list.append((scen, case, fullpath))
else:
final_list.append((scen, case, fullpath))
if not final_list:
raise bb.build.FuncFailed("There is no suitable testcase for this target")
return final_list
"""Generate testcase list in runtime"""
def generate_list(testlist):
list = []
final_list = []
if len(testlist) == 0:
raise bb.build.FuncFailed("No testcase defined in TEST_SCEN")
"""check testcase folder and add case list according to TEST_SCEN"""
for item in testlist.split(" "):
n = casestr.match(item)
if n:
item = n.group('scen')
casefile = n.group('case')
for dir in d.getVar("QEMUIMAGETESTS", True).split():
fulltestcase = os.path.join(dir, item, casefile)
if not os.path.isfile(fulltestcase):
raise bb.build.FuncFailed("Testcase %s not found" % fulltestcase)
list.append((item, casefile, fulltestcase))
else:
for dir in d.getVar("QEMUIMAGETESTS", True).split():
scenlist = os.path.join(dir, "scenario", machine, pname)
if not os.path.isfile(scenlist):
raise bb.build.FuncFailed("No scenario list file named %s found" % scenlist)
f = open(scenlist, "r")
for line in f:
if item != line.split()[0]:
continue
else:
casefile = line.split()[1]
fulltestcase = os.path.join(dir, item, casefile)
if not os.path.isfile(fulltestcase):
raise bb.build.FuncFailed("Testcase %s not found" % fulltestcase)
list.append((item, casefile, fulltestcase))
final_list = check_list(list)
return final_list
"""Clean tmp folder for testing"""
def clean_tmp():
tmppath = d.getVar('TEST_TMP', 1)
if os.path.isdir(tmppath):
for f in os.listdir(tmppath):
tmpfile = os.path.join(tmppath, f)
if os.path.isfile(tmpfile):
os.remove(tmpfile)
elif os.path.isdir(tmpfile):
shutil.rmtree(tmpfile, True)
"""Before running testing, clean temp folder first"""
clean_tmp()
"""check testcase folder and create test log folder"""
testpath = d.getVar('TEST_DIR', 1)
bb.utils.mkdirhier(testpath)
logpath = d.getVar('TEST_LOG', 1)
bb.utils.mkdirhier(logpath)
tmppath = d.getVar('TEST_TMP', 1)
bb.utils.mkdirhier(tmppath)
"""initialize test status file"""
test_status = d.getVar('TEST_STATUS', 1)
if os.path.exists(test_status):
os.remove(test_status)
os.system("touch %s" % test_status)
"""initialize result file"""
resultpath = d.getVar('TEST_RESULT', 1)
bb.utils.mkdirhier(resultpath)
resultfile = os.path.join(resultpath, "testresult.%s" % d.getVar('DATETIME', 1))
sresultfile = os.path.join(resultpath, "testresult.log")
machine = d.getVar('MACHINE', 1)
if os.path.exists(sresultfile):
os.remove(sresultfile)
os.system("touch %s" % resultfile)
os.symlink(resultfile, sresultfile)
f = open(sresultfile, "a")
f.write("\tTest Result for %s %s\n" % (machine, pname))
f.write("\t%-15s%-15s%-15s%-15s\n" % ("Testcase", "PASS", "FAIL", "NORESULT"))
f.close()
"""generate pre-defined testcase list"""
testlist = d.getVar('TEST_SCEN', 1)
fulllist = generate_list(testlist)
"""Begin testing"""
for index,test in enumerate(fulllist):
(scen, case, fullpath) = test
teststatus(case, "running", index, (len(fulllist) - 1))
runtest(scen, case, fullpath)
teststatus(case, "finished", index, (len(fulllist) - 1))
"""Print Test Result"""
ret = 0
f = open(sresultfile, "r")
for line in f:
m = resultstr.match(line)
if m:
if m.group('fail') == "1":
ret = 1
elif m.group('noresult') == "1":
ret = 2
line = line.strip('\n')
bb.note(line)
else:
line = line.strip('\n')
bb.note(line)
f.close()
"""Clean temp files for testing"""
clean_tmp()
if ret != 0:
raise bb.build.FuncFailed("Some testcases fail, pls. check test result and test log!!!")

View File

@@ -0,0 +1,711 @@
# BB Class inspired by ebuild.sh
#
# This class will test files after installation for certain
# security issues and other kind of issues.
#
# Checks we do:
# -Check the ownership and permissions
# -Check the RUNTIME path for the $TMPDIR
# -Check if .la files wrongly point to workdir
# -Check if .pc files wrongly point to workdir
# -Check if packages contains .debug directories or .so files
# where they should be in -dev or -dbg
# -Check if config.log contains traces to broken autoconf tests
# -Ensure that binaries in base_[bindir|sbindir|libdir] do not link
# into exec_prefix
# -Check that scripts in base_[bindir|sbindir|libdir] do not reference
# files under exec_prefix
#
# We need to have the scanelf utility as soon as
# possible and this is contained within the pax-utils-native.
# The package.bbclass can help us here.
#
inherit package
PACKAGE_DEPENDS += "pax-utils-native desktop-file-utils-native ${QADEPENDS}"
PACKAGEFUNCS += " do_package_qa "
# unsafe-references-in-binaries requires prelink-rtld from
# prelink-native, but we don't want this DEPENDS for -native builds
QADEPENDS = "prelink-native"
QADEPENDS_virtclass-native = ""
QADEPENDS_virtclass-nativesdk = ""
#
# dictionary for elf headers
#
# feel free to add and correct.
#
# TARGET_OS TARGET_ARCH MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
def package_qa_get_machine_dict():
return {
"darwin9" : {
"arm" : (40, 0, 0, True, 32),
},
"linux" : {
"arm" : (40, 97, 0, True, 32),
"armeb": (40, 97, 0, False, 32),
"powerpc": (20, 0, 0, False, 32),
"powerpc64": (21, 0, 0, False, 64),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
"i686": ( 3, 0, 0, True, 32),
"x86_64": (62, 0, 0, True, 64),
"ia64": (50, 0, 0, True, 64),
"alpha": (36902, 0, 0, True, 64),
"hppa": (15, 3, 0, False, 32),
"m68k": ( 4, 0, 0, False, 32),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"s390": (22, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
"sparc": ( 2, 0, 0, False, 32),
},
"linux-uclibc" : {
"arm" : ( 40, 97, 0, True, 32),
"armeb": ( 40, 97, 0, False, 32),
"powerpc": ( 20, 0, 0, False, 32),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
"i686": ( 3, 0, 0, True, 32),
"x86_64": ( 62, 0, 0, True, 64),
"mips": ( 8, 0, 0, False, 32),
"mipsel": ( 8, 0, 0, True, 32),
"avr32": (6317, 0, 0, False, 32),
"sh4": (42, 0, 0, True, 32),
},
"uclinux-uclibc" : {
"bfin": ( 106, 0, 0, True, 32),
},
"linux-gnueabi" : {
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
"linux-uclibceabi" : {
"arm" : (40, 0, 0, True, 32),
"armeb" : (40, 0, 0, False, 32),
},
"linux-gnu" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-gnuspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-uclibcspe" : {
"powerpc": (20, 0, 0, False, 32),
},
"linux-gnu" : {
"microblaze": (47787, 0, 0, False, 32),
"microblazeel": (47787, 0, 0, True, 32),
},
"linux-gnux32" : {
"x86_64": (62, 0, 0, True, 32),
},
}
# Currently not being used by default "desktop"
WARN_QA ?= "ldflags useless-rpaths rpaths unsafe-references-in-binaries unsafe-references-in-scripts"
ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch la2 pkgconfig la perms"
def package_qa_clean_path(path,d):
""" Remove the common prefix from the path. In this case it is the TMPDIR"""
return path.replace(d.getVar('TMPDIR',True),"")
def package_qa_write_error(error, d):
logfile = d.getVar('QA_LOGFILE', True)
if logfile:
p = d.getVar('P', True)
f = file( logfile, "a+")
print >> f, "%s: %s" % (p, error)
f.close()
def package_qa_handle_error(error_class, error_msg, d):
package_qa_write_error(error_msg, d)
if error_class in (d.getVar("ERROR_QA", True) or "").split():
bb.error("QA Issue: %s" % error_msg)
return False
else:
bb.warn("QA Issue: %s" % error_msg)
return True
QAPATHTEST[rpaths] = "package_qa_check_rpath"
def package_qa_check_rpath(file,name, d, elf, messages):
"""
Check for dangerous RPATHs
"""
if not elf:
return
scanelf = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'scanelf')
bad_dirs = [d.getVar('TMPDIR', True) + "/work", d.getVar('STAGING_DIR_TARGET', True)]
bad_dir_test = d.getVar('TMPDIR', True)
if not os.path.exists(scanelf):
bb.fatal("Can not check RPATH, scanelf (part of pax-utils-native) not found")
if not bad_dirs[0] in d.getVar('WORKDIR', True):
bb.fatal("This class assumed that WORKDIR is ${TMPDIR}/work... Not doing any check")
output = os.popen("%s -B -F%%r#F '%s'" % (scanelf,file))
txt = output.readline().split()
for line in txt:
for dir in bad_dirs:
if dir in line:
messages.append("package %s contains bad RPATH %s in file %s" % (name, line, file))
QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
def package_qa_check_useless_rpaths(file, name, d, elf, messages):
"""
Check for RPATHs that are useless but not dangerous
"""
if not elf:
return
objdump = d.getVar('OBJDUMP', True)
env_path = d.getVar('PATH', True)
libdir = d.getVar("libdir", True)
base_libdir = d.getVar("base_libdir", True)
import re
rpath_re = re.compile("\s+RPATH\s+(.*)")
for line in os.popen("LC_ALL=C PATH=%s %s -p '%s' 2> /dev/null" % (env_path, objdump, file), "r"):
m = rpath_re.match(line)
if m:
rpath = m.group(1)
if rpath == libdir or rpath == base_libdir:
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
messages.append("%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
"""
Check for ".so" library symlinks in non-dev packages
"""
if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-nativesdk") and path.endswith(".so") and os.path.islink(path):
messages.append("non -dev/-dbg/-nativesdk package contains symlink .so: %s path '%s'" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[debug-files] = "package_qa_check_dbg"
def package_qa_check_dbg(path, name, d, elf, messages):
"""
Check for ".debug" files or directories outside of the dbg package
"""
if not "-dbg" in name:
if '.debug' in path.split(os.path.sep):
messages.append("non debug package contains .debug directory: %s path %s" % \
(name, package_qa_clean_path(path,d)))
QAPATHTEST[perms] = "package_qa_check_perm"
def package_qa_check_perm(path,name,d, elf, messages):
"""
Check the permission of files
"""
return
QAPATHTEST[unsafe-references-in-binaries] = "package_qa_check_unsafe_references_in_binaries"
def package_qa_check_unsafe_references_in_binaries(path, name, d, elf, messages):
"""
Ensure binaries in base_[bindir|sbindir|libdir] do not link to files under exec_prefix
"""
if unsafe_references_skippable(path, name, d):
return
if elf:
import subprocess as sub
pn = d.getVar('PN', True)
exec_prefix = d.getVar('exec_prefix', True)
sysroot_path = d.getVar('STAGING_DIR_TARGET', True)
sysroot_path_usr = sysroot_path + exec_prefix
try:
ldd_output = bb.process.Popen(["prelink-rtld", "--root", sysroot_path, path], stdout=sub.PIPE).stdout.read()
except bb.process.CmdError:
error_msg = pn + ": prelink-rtld aborted when processing %s" % path
package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
return False
if sysroot_path_usr in ldd_output:
error_msg = pn + ": %s links to something under exec_prefix" % path
package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
error_msg = "ldd reports: %s" % ldd_output
package_qa_handle_error("unsafe-references-in-binaries", error_msg, d)
return False
QAPATHTEST[unsafe-references-in-scripts] = "package_qa_check_unsafe_references_in_scripts"
def package_qa_check_unsafe_references_in_scripts(path, name, d, elf, messages):
"""
Warn if scripts in base_[bindir|sbindir|libdir] reference files under exec_prefix
"""
if unsafe_references_skippable(path, name, d):
return
if not elf:
import stat
pn = d.getVar('PN', True)
# Ensure we're checking an executable script
statinfo = os.stat(path)
if bool(statinfo.st_mode & stat.S_IXUSR):
# grep shell scripts for possible references to /exec_prefix/
exec_prefix = d.getVar('exec_prefix', True)
statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path)
if os.system(statement) == 0:
error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
error_msg = "Shell scripts in base_bindir and base_sbindir should not reference anything in exec_prefix"
package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
def unsafe_references_skippable(path, name, d):
if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d):
return True
if "-dbg" in name or "-dev" in name:
return True
# Other package names to skip:
if name.startswith("kernel-module-"):
return True
# Skip symlinks
if os.path.islink(path):
return True
# Skip unusual rootfs layouts which make these tests irrelevant
exec_prefix = d.getVar('exec_prefix', True)
if exec_prefix == "":
return True
pkgdest = d.getVar('PKGDEST', True)
pkgdest = pkgdest + "/" + name
pkgdest = os.path.abspath(pkgdest)
base_bindir = pkgdest + d.getVar('base_bindir', True)
base_sbindir = pkgdest + d.getVar('base_sbindir', True)
base_libdir = pkgdest + d.getVar('base_libdir', True)
bindir = pkgdest + d.getVar('bindir', True)
sbindir = pkgdest + d.getVar('sbindir', True)
libdir = pkgdest + d.getVar('libdir', True)
if base_bindir == bindir and base_sbindir == sbindir and base_libdir == libdir:
return True
# Skip files not in base_[bindir|sbindir|libdir]
path = os.path.abspath(path)
if not (base_bindir in path or base_sbindir in path or base_libdir in path):
return True
return False
QAPATHTEST[arch] = "package_qa_check_arch"
def package_qa_check_arch(path,name,d, elf, messages):
"""
Check if archs are compatible
"""
if not elf:
return
target_os = d.getVar('TARGET_OS', True)
target_arch = d.getVar('TARGET_ARCH', True)
provides = d.getVar('PROVIDES', d, True)
# FIXME: Cross package confuse this check, so just skip them
for s in ['cross', 'nativesdk', 'cross-canadian']:
if bb.data.inherits_class(s, d):
return
# avoid following links to /usr/bin (e.g. on udev builds)
# we will check the files pointed to anyway...
if os.path.islink(path):
return
#if this will throw an exception, then fix the dict above
(machine, osabi, abiversion, littleendian, bits) \
= package_qa_get_machine_dict()[target_os][target_arch]
# Check the architecture and endiannes of the binary
if not ((machine == elf.machine()) or \
("virtual/kernel" in provides) and (target_os == "linux-gnux32")):
messages.append("Architecture did not match (%d to %d) on %s" % \
(machine, elf.machine(), package_qa_clean_path(path,d)))
elif not ((bits == elf.abiSize()) or \
("virtual/kernel" in provides) and (target_os == "linux-gnux32")):
messages.append("Bit size did not match (%d to %d) %s on %s" % \
(bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
elif not littleendian == elf.isLittleEndian():
messages.append("Endiannes did not match (%d to %d) on %s" % \
(littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
QAPATHTEST[desktop] = "package_qa_check_desktop"
def package_qa_check_desktop(path, name, d, elf, messages):
"""
Run all desktop files through desktop-file-validate.
"""
if path.endswith(".desktop"):
desktop_file_validate = os.path.join(d.getVar('STAGING_BINDIR_NATIVE',True),'desktop-file-validate')
output = os.popen("%s %s" % (desktop_file_validate, path))
# This only produces output on errors
for l in output:
messages.append("Desktop file issue: " + l.strip())
QAPATHTEST[ldflags] = "package_qa_hash_style"
def package_qa_hash_style(path, name, d, elf, messages):
"""
Check if the binary has the right hash style...
"""
if not elf:
return
if os.path.islink(path):
return
gnu_hash = "--hash-style=gnu" in d.getVar('LDFLAGS', True)
if not gnu_hash:
gnu_hash = "--hash-style=both" in d.getVar('LDFLAGS', True)
if not gnu_hash:
return
objdump = d.getVar('OBJDUMP', True)
env_path = d.getVar('PATH', True)
sane = False
has_syms = False
# If this binary has symbols, we expect it to have GNU_HASH too.
for line in os.popen("LC_ALL=C PATH=%s %s -p '%s' 2> /dev/null" % (env_path, objdump, path), "r"):
if "SYMTAB" in line:
has_syms = True
if "GNU_HASH" in line:
sane = True
if "[mips32]" in line or "[mips64]" in line:
sane = True
if has_syms and not sane:
messages.append("No GNU_HASH in the elf binary: '%s'" % path)
QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
def package_qa_check_buildpaths(path, name, d, elf, messages):
"""
Check for build paths inside target files and error if not found in the whitelist
"""
# Ignore .debug files, not interesting
if path.find(".debug") != -1:
return
# Ignore symlinks
if os.path.islink(path):
return
tmpdir = d.getVar('TMPDIR', True)
file_content = open(path).read()
if tmpdir in file_content:
messages.append("File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
def package_qa_check_license(workdir, d):
"""
Check for changes in the license files
"""
import tempfile
sane = True
lic_files = d.getVar('LIC_FILES_CHKSUM', True)
lic = d.getVar('LICENSE', True)
pn = d.getVar('PN', True)
if lic == "CLOSED":
return True
if not lic_files:
# just throw a warning now. Once licensing data in entered for enough of the recipes,
# this will be converted into error and False will be returned.
bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
return False
srcdir = d.getVar('S', True)
for url in lic_files.split():
(type, host, path, user, pswd, parm) = bb.decodeurl(url)
srclicfile = os.path.join(srcdir, path)
if not os.path.isfile(srclicfile):
raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile)
if 'md5' not in parm:
bb.error(pn + ": md5 checksum is not specified for ", url)
return False
beginline, endline = 0, 0
if 'beginline' in parm:
beginline = int(parm['beginline'])
if 'endline' in parm:
endline = int(parm['endline'])
if (not beginline) and (not endline):
md5chksum = bb.utils.md5_file(srclicfile)
else:
fi = open(srclicfile, 'r')
fo = tempfile.NamedTemporaryFile(mode='wb', prefix='poky.', suffix='.tmp', delete=False)
tmplicfile = fo.name;
lineno = 0
linesout = 0
for line in fi:
lineno += 1
if (lineno >= beginline):
if ((lineno <= endline) or not endline):
fo.write(line)
linesout += 1
else:
break
fo.flush()
fo.close()
fi.close()
md5chksum = bb.utils.md5_file(tmplicfile)
os.unlink(tmplicfile)
if parm['md5'] == md5chksum:
bb.note (pn + ": md5 checksum matched for ", url)
else:
bb.error (pn + ": md5 data is not matching for ", url)
bb.error (pn + ": The new md5 checksum is ", md5chksum)
bb.error (pn + ": Check if the license information has changed in")
sane = False
return sane
def package_qa_check_staged(path,d):
"""
Check staged la and pc files for sanity
-e.g. installed being false
As this is run after every stage we should be able
to find the one responsible for the errors easily even
if we look at every .pc and .la file
"""
sane = True
tmpdir = d.getVar('TMPDIR', True)
workdir = os.path.join(tmpdir, "work")
installed = "installed=yes"
if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
pkgconfigcheck = workdir
else:
pkgconfigcheck = tmpdir
# find all .la and .pc files
# read the content
# and check for stuff that looks wrong
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
if file.endswith(".la"):
file_content = open(path).read()
if workdir in file_content:
error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
sane = package_qa_handle_error("la", error_msg, d)
elif file.endswith(".pc"):
file_content = open(path).read()
if pkgconfigcheck in file_content:
error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
sane = package_qa_handle_error("pkgconfig", error_msg, d)
return sane
# Walk over all files in a directory and call func
def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
import oe.qa
#if this will throw an exception, then fix the dict above
target_os = d.getVar('TARGET_OS', True)
target_arch = d.getVar('TARGET_ARCH', True)
warnings = []
errors = []
for root, dirs, files in os.walk(path):
for file in files:
path = os.path.join(root,file)
elf = oe.qa.ELFFile(path)
try:
elf.open()
except:
elf = None
for func in warnfuncs:
func(path, package, d, elf, warnings)
for func in errorfuncs:
func(path, package, d, elf, errors)
for w in warnings:
bb.warn("QA Issue: %s" % w)
package_qa_write_error(w, d)
for e in errors:
bb.error("QA Issue: %s" % e)
package_qa_write_error(e, d)
return len(errors) == 0
def package_qa_check_rdepends(pkg, pkgdest, skip, d):
# Don't do this check for kernel/module recipes, there aren't too many debug/development
# packages and you can get false positives e.g. on kernel-module-lirc-dev
if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
return True
sane = True
if not "-dbg" in pkg and not "task-" in pkg and not "-image" in pkg:
# Copied from package_ipk.bbclass
# boiler plate to update the data
localdata = bb.data.createCopy(d)
root = "%s/%s" % (pkgdest, pkg)
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
pkgname = localdata.getVar('PKG_%s' % pkg, True)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
# Now check the RDEPENDS
rdepends = bb.utils.explode_deps(localdata.getVar('RDEPENDS', True) or "")
# Now do the sanity check!!!
for rdepend in rdepends:
if "-dbg" in rdepend and "debug-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkgname,rdepend)
sane = package_qa_handle_error("debug-deps", error_msg, d)
if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
error_msg = "%s rdepends on %s" % (pkgname, rdepend)
sane = package_qa_handle_error("dev-deps", error_msg, d)
return sane
# The PACKAGE FUNC to scan each package
python do_package_qa () {
bb.note("DO PACKAGE QA")
logdir = d.getVar('T', True)
pkg = d.getVar('PN', True)
# Check the compile log for host contamination
compilelog = os.path.join(logdir,"log.do_compile")
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % compilelog
if os.system(statement) == 0:
bb.warn("%s: The compile log indicates that host include and/or library paths were used. Please check the log '%s' for more information." % \
(pkg, compilelog))
# Check the install log for host contamination
installlog = os.path.join(logdir,"log.do_install")
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % installlog
if os.system(statement) == 0:
bb.warn("%s: The install log indicates that host include and/or library paths were used. Please check the log '%s' for more information." % \
(pkg, installlog))
# Scan the packages...
pkgdest = d.getVar('PKGDEST', True)
packages = d.getVar('PACKAGES', True)
# no packages should be scanned
if not packages:
return
testmatrix = d.getVarFlags("QAPATHTEST")
g = globals()
walk_sane = True
rdepends_sane = True
for package in packages.split():
skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
if skip:
bb.note("Package %s skipping QA tests: %s" % (package, str(skip)))
warnchecks = []
for w in (d.getVar("WARN_QA", True) or "").split():
if w in skip:
continue
if w in testmatrix and testmatrix[w] in g:
warnchecks.append(g[testmatrix[w]])
errorchecks = []
for e in (d.getVar("ERROR_QA", True) or "").split():
if e in skip:
continue
if e in testmatrix and testmatrix[e] in g:
errorchecks.append(g[testmatrix[e]])
bb.note("Checking Package: %s" % package)
path = "%s/%s" % (pkgdest, package)
if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
walk_sane = False
if not package_qa_check_rdepends(package, pkgdest, skip, d):
rdepends_sane = False
if not walk_sane or not rdepends_sane:
bb.fatal("QA run found fatal errors. Please consider fixing them.")
bb.note("DONE with PACKAGE QA")
}
python do_qa_staging() {
bb.note("QA checking staging")
if not package_qa_check_staged(bb.data.expand('${SYSROOT_DESTDIR}/${STAGING_LIBDIR}',d), d):
bb.fatal("QA staging was broken by the package built above")
}
python do_qa_configure() {
configs = []
workdir = d.getVar('WORKDIR', True)
bb.note("Checking autotools environment for common misconfiguration")
for root, dirs, files in os.walk(workdir):
statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \
os.path.join(root,"config.log")
if "config.log" in files:
if os.system(statement) == 0:
bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
Rerun configure task after fixing this. The path was '%s'""" % root)
if "configure.ac" in files:
configs.append(os.path.join(root,"configure.ac"))
if "configure.in" in files:
configs.append(os.path.join(root, "configure.in"))
cnf = d.getVar('EXTRA_OECONF', True) or ""
if "gettext" not in d.getVar('P', True) and "gcc-runtime" not in d.getVar('P', True) and "--disable-nls" not in cnf:
ml = d.getVar("MLPREFIX", True) or ""
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
gt = "gettext-native"
elif bb.data.inherits_class('cross-canadian', d):
gt = "gettext-nativesdk"
else:
gt = "virtual/" + ml + "gettext"
deps = bb.utils.explode_deps(d.getVar('DEPENDS', True) or "")
if gt not in deps:
for config in configs:
gnu = "grep \"^[[:space:]]*AM_GNU_GETTEXT\" %s >/dev/null" % config
if os.system(gnu) == 0:
bb.fatal("""%s required but not in DEPENDS for file %s.
Missing inherit gettext?""" % (gt, config))
if not package_qa_check_license(workdir, d):
bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
}
# The Staging Func, to check all staging
#addtask qa_staging after do_populate_sysroot before do_build
do_populate_sysroot[postfuncs] += "do_qa_staging "
# Check broken config.log files, for packages requiring Gettext which don't
# have it in DEPENDS and for correct LIC_FILES_CHKSUM
#addtask qa_configure after do_configure before do_compile
do_configure[postfuncs] += "do_qa_configure "

View File

@@ -0,0 +1,5 @@
do_rootfs[depends] += "insserv-native:do_populate_sysroot"
run_insserv () {
insserv -p ${IMAGE_ROOTFS}/etc/init.d -c ${STAGING_ETCDIR_NATIVE}/insserv.conf
}
ROOTFS_POSTPROCESS_COMMAND += " run_insserv ; "

View File

@@ -0,0 +1,45 @@
#
# set the ARCH environment variable for kernel compilation (including
# modules). return value must match one of the architecture directories
# in the kernel source "arch" directory
#
valid_archs = "alpha cris ia64 \
i386 x86 \
m68knommu m68k ppc powerpc powerpc64 ppc64 \
sparc sparc64 \
arm arm26 \
m32r mips \
sh sh64 um h8300 \
parisc s390 v850 \
avr32 blackfin \
microblaze"
def map_kernel_arch(a, d):
import re
valid_archs = d.getVar('valid_archs', 1).split()
if re.match('(i.86|athlon|x86.64)$', a): return 'x86'
elif re.match('arm26$', a): return 'arm26'
elif re.match('armeb$', a): return 'arm'
elif re.match('mipsel$', a): return 'mips'
elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
elif re.match('sh(3|4)$', a): return 'sh'
elif re.match('bfin', a): return 'blackfin'
elif re.match('microblazeel', a): return 'microblaze'
elif a in valid_archs: return a
else:
bb.error("cannot map '%s' to a linux kernel architecture" % a)
export ARCH = "${@map_kernel_arch(d.getVar('TARGET_ARCH', 1), d)}"
def map_uboot_arch(a, d):
import re
if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
elif re.match('i.86$', a): return 'x86'
return a
export UBOOT_ARCH = "${@map_uboot_arch(d.getVar('ARCH', 1), d)}"

View File

@@ -0,0 +1,270 @@
S = "${WORKDIR}/linux"
def find_patches(d):
patches=src_patches(d)
patch_list=[]
for p in patches:
_, _, local, _, _, _ = bb.decodeurl(p)
patch_list.append(local)
return patch_list
do_patch() {
cd ${S}
if [ -f ${WORKDIR}/defconfig ]; then
defconfig=${WORKDIR}/defconfig
fi
# if kernel tools are available in-tree, they are preferred
# and are placed on the path before any external tools. Unless
# the external tools flag is set, in that case we do nothing.
if [ -f "${S}/scripts/util/configme" ]; then
if [ -z "${EXTERNAL_KERNEL_TOOLS}" ]; then
PATH=${S}/scripts/util:${PATH}
fi
fi
kbranch=${KBRANCH}
if [ -n "${YOCTO_KERNEL_EXTERNAL_BRANCH}" ]; then
# switch from a generic to a specific branch
kbranch=${YOCTO_KERNEL_EXTERNAL_BRANCH}
fi
# simply ensures that a branch of the right name has been created
if [ -n "${YOCTO_KERNEL_META_DATA}" ]; then
createme_flags="--disable-meta-gen"
fi
createme ${createme_flags} ${ARCH} ${kbranch} ${defconfig}
if [ $? -ne 0 ]; then
echo "ERROR. Could not create ${kbranch}"
exit 1
fi
patches="${@" ".join(find_patches(d))}"
# This loops through all patches, and looks for directories that do
# not already have feature descriptions. If a directory doesn't have
# a feature description, we switch to the ${WORKDIR} variant of the
# feature (so we can write to it) and generate a feature for those
# patches. The generated feature will respect the patch order.
#
# By leaving source patch directories that already have .scc files
# as-is it means that a SRC_URI can only contain a .scc file, and all
# patches that the .scc references will be picked up, without having
# to be repeated on the SRC_URI line .. which is more intutive
set +e
patch_dirs=
for p in ${patches}; do
pdir=`dirname ${p}`
pname=`basename ${p}`
scc=`find ${pdir} -maxdepth 1 -name '*.scc'`
if [ -z "${scc}" ]; then
# there is no scc file. We need to switch to someplace that we know
# we can create content (the workdir)
workdir_subdir=`echo ${pdir} | sed "s%^.*/${PN}%%" | sed 's%^/%%'`
suggested_dir="${WORKDIR}/${workdir_subdir}"
echo ${gen_feature_dirs} | grep -q ${suggested_dir}
if [ $? -ne 0 ]; then
gen_feature_dirs="${gen_feature_dirs} ${suggested_dir}"
fi
# we call the file *.scc_tmp, so the test above will continue to find
# that patches from a common subdirectory don't have a scc file and
# they'll be placed in order, into this file. We'll rename it later.
echo "patch ${pname}" >> ${suggested_dir}/gen_${workdir_subdir}_desc.scc_tmp
else
suggested_dir="${pdir}"
fi
echo ${patch_dirs} | grep -q ${suggested_dir}
if [ $? -ne 0 ]; then
patch_dirs="${patch_dirs} ${suggested_dir}"
fi
done
# go through the patch directories and look for any scc feature files
# that were constructed above. If one is found, rename it to ".scc" so
# the kernel patching can see it.
for pdir in ${patch_dirs}; do
scc=`find ${pdir} -maxdepth 1 -name '*.scc_tmp'`
if [ -n "${scc}" ]; then
new_scc=`echo ${scc} | sed 's/_tmp//'`
mv -f ${scc} ${new_scc}
fi
done
patch_dirs="${patch_dirs} ${WORKDIR}"
# add any explicitly referenced features onto the end of the feature
# list that is passed to the kernel build scripts.
if [ -n "${KERNEL_FEATURES}" ]; then
for feat in ${KERNEL_FEATURES}; do
addon_features="$addon_features --feature $feat"
done
fi
# updates or generates the target description
updateme --branch ${kbranch} -DKDESC=${KMACHINE}:${LINUX_KERNEL_TYPE} \
${addon_features} ${ARCH} ${KMACHINE} ${patch_dirs}
if [ $? -ne 0 ]; then
echo "ERROR. Could not update ${kbranch}"
exit 1
fi
# executes and modifies the source tree as required
patchme ${kbranch}
if [ $? -ne 0 ]; then
echo "ERROR. Could not modify ${kbranch}"
exit 1
fi
}
do_kernel_checkout() {
if [ -d ${WORKDIR}/git/.git/refs/remotes/origin ]; then
echo "Fixing up git directory for ${LINUX_KERNEL_TYPE}/${KMACHINE}"
rm -rf ${S}
mkdir ${S}
mv ${WORKDIR}/git/.git ${S}
if [ -e ${S}/.git/packed-refs ]; then
cd ${S}
rm -f .git/refs/remotes/origin/HEAD
IFS='
';
for r in `git show-ref | grep remotes`; do
ref=`echo $r | cut -d' ' -f1`;
b=`echo $r | cut -d' ' -f2 | sed 's%refs/remotes/origin/%%'`;
dir=`dirname $b`
mkdir -p .git/refs/heads/$dir
echo $ref > .git/refs/heads/$b
done
cd ..
else
cp -r ${S}/.git/refs/remotes/origin/* ${S}/.git/refs/heads
rmdir ${S}/.git/refs/remotes/origin
fi
fi
cd ${S}
set +e
git show-ref --quiet --verify -- "refs/heads/${KBRANCH}"
if [ $? -eq 0 ]; then
# checkout and clobber and unimportant files
git checkout -f ${KBRANCH}
else
echo "Not checking out ${KBRANCH}, it will be created later"
git checkout -f master
fi
}
do_kernel_checkout[dirs] = "${S}"
addtask kernel_checkout before do_patch after do_unpack
do_kernel_configme[dirs] = "${CCACHE_DIR} ${S} ${B}"
do_kernel_configme() {
echo "[INFO] doing kernel configme"
if [ -n ${KCONFIG_MODE} ]; then
configmeflags=${KCONFIG_MODE}
else
# If a defconfig was passed, use =n as the baseline, which is achieved
# via --allnoconfig
if [ -f ${WORKDIR}/defconfig ]; then
configmeflags="--allnoconfig"
fi
fi
cd ${S}
PATH=${PATH}:${S}/scripts/util
configme ${configmeflags} --reconfig --output ${B} ${KBRANCH} ${KMACHINE}
if [ $? -ne 0 ]; then
echo "ERROR. Could not configure ${KMACHINE}-${LINUX_KERNEL_TYPE}"
exit 1
fi
echo "# Global settings from linux recipe" >> ${B}/.config
echo "CONFIG_LOCALVERSION="\"${LINUX_VERSION_EXTENSION}\" >> ${B}/.config
}
python do_kernel_configcheck() {
import bb, re, string, sys, commands
bb.plain("NOTE: validating kernel configuration")
pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH', True), "${S}/scripts/util/")
cmd = bb.data.expand("cd ${B}/..; kconf_check -config- ${B} ${S} ${B} ${KBRANCH}",d )
ret, result = commands.getstatusoutput("%s%s" % (pathprefix, cmd))
bb.plain( "%s" % result )
}
# Ensure that the branches (BSP and meta) are on the locatios specified by
# their SRCREV values. If they are NOT on the right commits, the branches
# are reset to the correct commit.
do_validate_branches() {
cd ${S}
# nothing to do if bootstrapping
if [ -n "${YOCTO_KERNEL_EXTERNAL_BRANCH}" ]; then
return
fi
# nothing to do if SRCREV is AUTOREV
if [ "${SRCREV_machine}" = "AUTOINC" ]; then
# restore the branch for builds
git checkout -f ${KBRANCH}
return
fi
branch_head=`git show-ref -s --heads ${KBRANCH}`
meta_head=`git show-ref -s --heads ${KMETA}`
target_branch_head="${SRCREV_machine}"
target_meta_head="${SRCREV_meta}"
current=`git branch |grep \*|sed 's/^\* //'`
if [ -n "$target_branch_head" ] && [ "$branch_head" != "$target_branch_head" ]; then
if [ -n "${KERNEL_REVISION_CHECKING}" ]; then
ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
echo "ERROR ${target_branch_head} is not a valid commit ID."
echo "The kernel source tree may be out of sync"
exit 1
else
echo "Forcing branch $current to ${target_branch_head}"
git branch -m $current $current-orig
git checkout -b $current ${target_branch_head}
fi
fi
fi
if [ "$meta_head" != "$target_meta_head" ]; then
if [ -n "${KERNEL_REVISION_CHECKING}" ]; then
ref=`git show ${target_meta_head} 2>&1 | head -n1 || true`
if [ "$ref" = "fatal: bad object ${target_meta_head}" ]; then
echo "ERROR ${target_meta_head} is not a valid commit ID"
echo "The kernel source tree may be out of sync"
exit 1
else
echo "Forcing branch meta to ${target_meta_head}"
git branch -m ${KMETA} ${KMETA}-orig
git checkout -b ${KMETA} ${target_meta_head}
fi
fi
fi
# restore the branch for builds
git checkout -f ${KBRANCH}
}
# Many scripts want to look in arch/$arch/boot for the bootable
# image. This poses a problem for vmlinux based booting. This
# task arranges to have vmlinux appear in the normalized directory
# location.
do_kernel_link_vmlinux() {
if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
mkdir ${B}/arch/${ARCH}/boot
fi
cd ${B}/arch/${ARCH}/boot
ln -sf ../../../vmlinux
}

View File

@@ -0,0 +1,530 @@
inherit linux-kernel-base module_strip
PROVIDES += "virtual/kernel"
DEPENDS += "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}depmod virtual/${TARGET_PREFIX}gcc${KERNEL_CCSUFFIX} update-modules"
# we include gcc above, we dont need virtual/libc
INHIBIT_DEFAULT_DEPS = "1"
KERNEL_IMAGETYPE ?= "zImage"
INITRAMFS_IMAGE ?= ""
INITRAMFS_TASK ?= ""
python __anonymous () {
kerneltype = d.getVar('KERNEL_IMAGETYPE', 1) or ''
if kerneltype == 'uImage':
depends = d.getVar("DEPENDS", 1)
depends = "%s u-boot-mkimage-native" % depends
d.setVar("DEPENDS", depends)
image = d.getVar('INITRAMFS_IMAGE', True)
if image:
d.setVar('INITRAMFS_TASK', '${INITRAMFS_IMAGE}:do_rootfs')
}
inherit kernel-arch deploy
PACKAGES_DYNAMIC += "kernel-module-*"
PACKAGES_DYNAMIC += "kernel-image-*"
PACKAGES_DYNAMIC += "kernel-firmware-*"
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
KERNEL_PRIORITY = "${@d.getVar('PV',1).split('-')[0].split('.')[-1]}"
KERNEL_RELEASE ?= "${KERNEL_VERSION}"
KERNEL_CCSUFFIX ?= ""
KERNEL_LDSUFFIX ?= ""
# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
# specific options necessary for building the kernel and modules.
#FIXME: should be this: TARGET_CC_KERNEL_ARCH ?= "${TARGET_CC_ARCH}"
TARGET_CC_KERNEL_ARCH ?= ""
HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
TARGET_LD_KERNEL_ARCH ?= ""
HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}${TOOLCHAIN_OPTIONS}"
KERNEL_LD = "${HOST_PREFIX}ld${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}${TOOLCHAIN_OPTIONS}"
# Where built kernel lies in the kernel tree
KERNEL_OUTPUT ?= "arch/${ARCH}/boot/${KERNEL_IMAGETYPE}"
KERNEL_IMAGEDEST = "boot"
#
# configuration
#
export CMDLINE_CONSOLE = "console=${@d.getVar("KERNEL_CONSOLE",1) or "ttyS0"}"
KERNEL_VERSION = "${@get_kernelversion('${B}')}"
KERNEL_LOCALVERSION ?= ""
# kernels are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"
# U-Boot support
UBOOT_ENTRYPOINT ?= "20008000"
UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}"
# For the kernel, we don't want the '-e MAKEFLAGS=' in EXTRA_OEMAKE.
# We don't want to override kernel Makefile variables from the environment
EXTRA_OEMAKE = ""
KERNEL_ALT_IMAGETYPE ??= ""
KERNEL_IMAGETYPE_FOR_MAKE = "${@(lambda s: s[:-3] if s[-3:] == ".gz" else s)(d.getVar('KERNEL_IMAGETYPE', 1))}"
kernel_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
oe_runmake include/linux/version.h CC="${KERNEL_CC}" LD="${KERNEL_LD}"
oe_runmake ${KERNEL_IMAGETYPE_FOR_MAKE} ${KERNEL_ALT_IMAGETYPE} CC="${KERNEL_CC}" LD="${KERNEL_LD}"
if test "${KERNEL_IMAGETYPE_FOR_MAKE}.gz" = "${KERNEL_IMAGETYPE}"; then
gzip -9c < "${KERNEL_IMAGETYPE_FOR_MAKE}" > "${KERNEL_OUTPUT}"
fi
}
do_compile_kernelmodules() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}"
else
bbnote "no modules to compile"
fi
}
addtask compile_kernelmodules after do_compile before do_install
kernel_do_install() {
#
# First install the modules
#
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" modules_install
rm -f "${D}/lib/modules/${KERNEL_VERSION}/modules.order"
rm -f "${D}/lib/modules/${KERNEL_VERSION}/modules.builtin"
else
bbnote "no modules to install"
fi
#
# Install various kernel output (zImage, map file, config, module support files)
#
install -d ${D}/${KERNEL_IMAGEDEST}
install -d ${D}/boot
install -m 0644 ${KERNEL_OUTPUT} ${D}/${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE}-${KERNEL_VERSION}
install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION}
install -m 0644 vmlinux ${D}/boot/vmlinux-${KERNEL_VERSION}
[ -e Module.symvers ] && install -m 0644 Module.symvers ${D}/boot/Module.symvers-${KERNEL_VERSION}
install -d ${D}/etc/modutils
install -d ${D}/etc/modprobe.d
#
# Support for external module building - create a minimal copy of the
# kernel source tree.
#
kerneldir=${D}/kernel
install -d $kerneldir
#
# Store the kernel version in sysroots for module-base.bbclass
#
echo "${KERNEL_VERSION}" > $kerneldir/kernel-abiversion
#
# Store kernel image name to allow use during image generation
#
echo "${KERNEL_IMAGE_BASE_NAME}" >$kerneldir/kernel-image-name
#
# Copy the entire source tree. In case an external build directory is
# used, copy the build directory over first, then copy over the source
# dir. This ensures the original Makefiles are used and not the
# redirecting Makefiles in the build directory.
#
# work and sysroots can be on different partitions, so we can't rely on
# hardlinking, unfortunately.
#
cp -fR * $kerneldir
cp .config $kerneldir
if [ "${S}" != "${B}" ]; then
cp -fR ${S}/* $kerneldir
fi
install -m 0644 ${KERNEL_OUTPUT} $kerneldir/${KERNEL_IMAGETYPE}
install -m 0644 System.map $kerneldir/System.map-${KERNEL_VERSION}
#
# Clean and remove files not needed for building modules.
# Some distributions go through a lot more trouble to strip out
# unecessary headers, for now, we just prune the obvious bits.
#
# We don't want to leave host-arch binaries in /sysroots, so
# we clean the scripts dir while leaving the generated config
# and include files.
#
oe_runmake -C $kerneldir CC="${KERNEL_CC}" LD="${KERNEL_LD}" clean
make -C $kerneldir _mrproper_scripts
find $kerneldir -path $kerneldir/scripts -prune -o -name "*.[csS]" -exec rm '{}' \;
find $kerneldir/Documentation -name "*.txt" -exec rm '{}' \;
# As of Linux kernel version 3.0.1, the clean target removes
# arch/powerpc/lib/crtsavres.o which is present in
# KBUILD_LDFLAGS_MODULE, making it required to build external modules.
if [ ${ARCH} = "powerpc" ]; then
cp arch/powerpc/lib/crtsavres.o $kerneldir/arch/powerpc/lib/crtsavres.o
fi
# Remove the following binaries which cause strip errors
# during do_package for cross-compiled platforms
bin_files="arch/powerpc/boot/addnote arch/powerpc/boot/hack-coff \
arch/powerpc/boot/mktree"
for entry in $bin_files; do
rm -f $kerneldir/$entry
done
}
PACKAGE_PREPROCESS_FUNCS += "kernel_package_preprocess"
kernel_package_preprocess () {
rm -rf ${PKGD}/kernel
}
sysroot_stage_all_append() {
sysroot_stage_dir ${D}/kernel ${SYSROOT_DESTDIR}/kernel
}
kernel_do_configure() {
# Copy defconfig to .config if .config does not exist. This allows
# recipes to manage the .config themselves in do_configure_prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
cp "${WORKDIR}/defconfig" "${B}/.config"
fi
yes '' | oe_runmake oldconfig
if [ ! -z "${INITRAMFS_IMAGE}" ]; then
for img in cpio.gz cpio.lzo cpio.lzma cpio.xz; do
if [ -e "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" ]; then
cp "${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE}-${MACHINE}.$img" initramfs.$img
fi
done
fi
}
do_configure[depends] += "${INITRAMFS_TASK}"
do_savedefconfig() {
oe_runmake savedefconfig
}
do_savedefconfig[nostamp] = "1"
addtask savedefconfig after do_configure
pkg_postinst_kernel-base () {
cd /${KERNEL_IMAGEDEST}; update-alternatives --install /${KERNEL_IMAGEDEST}/${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} ${KERNEL_PRIORITY} || true
}
pkg_postrm_kernel-base () {
cd /${KERNEL_IMAGEDEST}; update-alternatives --remove ${KERNEL_IMAGETYPE} ${KERNEL_IMAGETYPE}-${KERNEL_VERSION} || true
}
inherit cml1
EXPORT_FUNCTIONS do_compile do_install do_configure
# kernel-base becomes kernel-${KERNEL_VERSION}
# kernel-image becomes kernel-image-${KERNEL_VERISON}
PACKAGES = "kernel kernel-base kernel-image kernel-dev kernel-vmlinux kernel-misc"
FILES = ""
FILES_kernel-image = "/boot/${KERNEL_IMAGETYPE}*"
FILES_kernel-dev = "/boot/System.map* /boot/Module.symvers* /boot/config*"
FILES_kernel-vmlinux = "/boot/vmlinux*"
# misc is a package to contain files we need in staging
FILES_kernel-misc = "/kernel/include/config /kernel/scripts /kernel/drivers/crypto /kernel/drivers/media"
RDEPENDS_kernel = "kernel-base"
# Allow machines to override this dependency if kernel image files are
# not wanted in images as standard
RDEPENDS_kernel-base ?= "kernel-image"
PKG_kernel-image = "kernel-image-${@legitimize_package_name('${KERNEL_VERSION}')}"
PKG_kernel-base = "kernel-${@legitimize_package_name('${KERNEL_VERSION}')}"
ALLOW_EMPTY_kernel = "1"
ALLOW_EMPTY_kernel-base = "1"
ALLOW_EMPTY_kernel-image = "1"
pkg_postinst_kernel-image () {
if [ ! -e "$D/lib/modules/${KERNEL_VERSION}" ]; then
mkdir -p $D/lib/modules/${KERNEL_VERSION}
fi
if [ -n "$D" ]; then
${HOST_PREFIX}depmod -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_VERSION} ${KERNEL_VERSION}
else
depmod -a
fi
}
pkg_postinst_modules () {
if [ -n "$D" ]; then
${HOST_PREFIX}depmod -A -b $D -F ${STAGING_KERNEL_DIR}/System.map-${KERNEL_VERSION} ${KERNEL_VERSION}
else
depmod -a
update-modules || true
fi
}
pkg_postrm_modules () {
update-modules || true
}
autoload_postinst_fragment() {
if [ x"$D" = "x" ]; then
modprobe %s || true
fi
}
# autoload defaults (alphabetically sorted)
module_autoload_hidp = "hidp"
module_autoload_ipv6 = "ipv6"
module_autoload_ipsec = "ipsec"
module_autoload_ircomm-tty = "ircomm-tty"
module_autoload_rfcomm = "rfcomm"
module_autoload_sa1100-rtc = "sa1100-rtc"
# sa1100-rtc was renamed in 2.6.23 onwards
module_autoload_rtc-sa1100 = "rtc-sa1100"
# alias defaults (alphabetically sorted)
module_conf_af_packet = "alias net-pf-17 af_packet"
module_conf_bluez = "alias net-pf-31 bluez"
module_conf_bnep = "alias bt-proto-4 bnep"
module_conf_hci_uart = "alias tty-ldisc-15 hci_uart"
module_conf_l2cap = "alias bt-proto-0 l2cap"
module_conf_sco = "alias bt-proto-2 sco"
module_conf_rfcomm = "alias bt-proto-3 rfcomm"
python populate_packages_prepend () {
def extract_modinfo(file):
import tempfile, re
tempfile.tempdir = d.getVar("WORKDIR", 1)
tf = tempfile.mkstemp()
tmpfile = tf[1]
cmd = "PATH=\"%s\" %sobjcopy -j .modinfo -O binary %s %s" % (d.getVar("PATH", 1), d.getVar("HOST_PREFIX", 1) or "", file, tmpfile)
os.system(cmd)
f = open(tmpfile)
l = f.read().split("\000")
f.close()
os.close(tf[0])
os.unlink(tmpfile)
exp = re.compile("([^=]+)=(.*)")
vals = {}
for i in l:
m = exp.match(i)
if not m:
continue
vals[m.group(1)] = m.group(2)
return vals
def parse_depmod():
import re
dvar = d.getVar('PKGD', 1)
if not dvar:
bb.error("PKGD not defined")
return
kernelver = d.getVar('KERNEL_VERSION', 1)
kernelver_stripped = kernelver
m = re.match('^(.*-hh.*)[\.\+].*$', kernelver)
if m:
kernelver_stripped = m.group(1)
path = d.getVar("PATH", 1)
host_prefix = d.getVar("HOST_PREFIX", 1) or ""
cmd = "PATH=\"%s\" %sdepmod -n -a -r -b %s -F %s/boot/System.map-%s %s" % (path, host_prefix, dvar, dvar, kernelver, kernelver_stripped)
f = os.popen(cmd, 'r')
deps = {}
pattern0 = "^(.*\.k?o):..*$"
pattern1 = "^(.*\.k?o):\s*(.*\.k?o)\s*$"
pattern2 = "^(.*\.k?o):\s*(.*\.k?o)\s*\\\$"
pattern3 = "^\t(.*\.k?o)\s*\\\$"
pattern4 = "^\t(.*\.k?o)\s*$"
line = f.readline()
while line:
if not re.match(pattern0, line):
line = f.readline()
continue
m1 = re.match(pattern1, line)
if m1:
deps[m1.group(1)] = m1.group(2).split()
else:
m2 = re.match(pattern2, line)
if m2:
deps[m2.group(1)] = m2.group(2).split()
line = f.readline()
m3 = re.match(pattern3, line)
while m3:
deps[m2.group(1)].extend(m3.group(1).split())
line = f.readline()
m3 = re.match(pattern3, line)
m4 = re.match(pattern4, line)
deps[m2.group(1)].extend(m4.group(1).split())
line = f.readline()
f.close()
return deps
def get_dependencies(file, pattern, format):
# file no longer includes PKGD
file = file.replace(d.getVar('PKGD', 1) or '', '', 1)
# instead is prefixed with /lib/modules/${KERNEL_VERSION}
file = file.replace("/lib/modules/%s/" % d.getVar('KERNEL_VERSION', 1) or '', '', 1)
if module_deps.has_key(file):
import re
dependencies = []
for i in module_deps[file]:
m = re.match(pattern, os.path.basename(i))
if not m:
continue
on = legitimize_package_name(m.group(1))
dependency_pkg = format % on
dependencies.append(dependency_pkg)
return dependencies
return []
def frob_metadata(file, pkg, pattern, format, basename):
import re
vals = extract_modinfo(file)
dvar = d.getVar('PKGD', 1)
# If autoloading is requested, output /etc/modutils/<name> and append
# appropriate modprobe commands to the postinst
autoload = d.getVar('module_autoload_%s' % basename, 1)
if autoload:
name = '%s/etc/modutils/%s' % (dvar, basename)
f = open(name, 'w')
for m in autoload.split():
f.write('%s\n' % m)
f.close()
postinst = d.getVar('pkg_postinst_%s' % pkg, 1)
if not postinst:
bb.fatal("pkg_postinst_%s not defined" % pkg)
postinst += d.getVar('autoload_postinst_fragment', 1) % autoload
d.setVar('pkg_postinst_%s' % pkg, postinst)
# Write out any modconf fragment
modconf = d.getVar('module_conf_%s' % basename, 1)
if modconf:
name = '%s/etc/modprobe.d/%s.conf' % (dvar, basename)
f = open(name, 'w')
f.write("%s\n" % modconf)
f.close()
files = d.getVar('FILES_%s' % pkg, 1)
files = "%s /etc/modutils/%s /etc/modutils/%s.conf /etc/modprobe.d/%s.conf" % (files, basename, basename, basename)
d.setVar('FILES_%s' % pkg, files)
if vals.has_key("description"):
old_desc = d.getVar('DESCRIPTION_' + pkg, 1) or ""
d.setVar('DESCRIPTION_' + pkg, old_desc + "; " + vals["description"])
rdepends_str = d.getVar('RDEPENDS_' + pkg, 1)
if rdepends_str:
rdepends = rdepends_str.split()
else:
rdepends = []
rdepends.extend(get_dependencies(file, pattern, format))
d.setVar('RDEPENDS_' + pkg, ' '.join(rdepends))
module_deps = parse_depmod()
module_regex = '^(.*)\.k?o$'
module_pattern = 'kernel-module-%s'
postinst = d.getVar('pkg_postinst_modules', 1)
postrm = d.getVar('pkg_postrm_modules', 1)
do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.bin$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.fw$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.cis$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
do_split_packages(d, root='/lib/modules', file_regex=module_regex, output_pattern=module_pattern, description='%s kernel module', postinst=postinst, postrm=postrm, recursive=True, hook=frob_metadata, extra_depends='update-modules kernel-%s' % d.getVar("KERNEL_VERSION", 1))
import re
metapkg = "kernel-modules"
d.setVar('ALLOW_EMPTY_' + metapkg, "1")
d.setVar('FILES_' + metapkg, "")
blacklist = [ 'kernel-dev', 'kernel-image', 'kernel-base', 'kernel-vmlinux', 'perf', 'perf-dbg', 'kernel-misc' ]
for l in module_deps.values():
for i in l:
pkg = module_pattern % legitimize_package_name(re.match(module_regex, os.path.basename(i)).group(1))
blacklist.append(pkg)
metapkg_rdepends = []
packages = d.getVar('PACKAGES', 1).split()
for pkg in packages[1:]:
if not pkg in blacklist and not pkg in metapkg_rdepends:
metapkg_rdepends.append(pkg)
d.setVar('RDEPENDS_' + metapkg, ' '.join(metapkg_rdepends))
d.setVar('DESCRIPTION_' + metapkg, 'Kernel modules meta package')
packages.append(metapkg)
d.setVar('PACKAGES', ' '.join(packages))
}
# Support checking the kernel size since some kernels need to reside in partitions
# with a fixed length or there is a limit in transferring the kernel to memory
do_sizecheck() {
if [ ! -z "${KERNEL_IMAGE_MAXSIZE}" ]; then
size=`ls -l ${KERNEL_OUTPUT} | awk '{ print $5}'`
if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
rm ${KERNEL_OUTPUT}
die "This kernel (size=$size > ${KERNEL_IMAGE_MAXSIZE}) is too big for your device. Please reduce the size of the kernel by making more of it modular."
fi
fi
}
addtask sizecheck before do_install after do_compile
KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
# Don't include the DATETIME variable in the sstate package signatures
KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
KERNEL_IMAGE_SYMLINK_NAME ?= "${KERNEL_IMAGETYPE}-${MACHINE}"
kernel_do_deploy() {
install -m 0644 ${KERNEL_OUTPUT} ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
if (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
tar -cvzf ${DEPLOYDIR}/modules-${KERNEL_VERSION}-${PR}-${MACHINE}.tgz -C ${D} lib
fi
if test "x${KERNEL_IMAGETYPE}" = "xuImage" ; then
if test -e arch/${ARCH}/boot/uImage ; then
cp arch/${ARCH}/boot/uImage ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
elif test -e arch/${ARCH}/boot/compressed/vmlinux ; then
${OBJCOPY} -O binary -R .note -R .comment -S arch/${ARCH}/boot/compressed/vmlinux linux.bin
uboot-mkimage -A ${ARCH} -O linux -T kernel -C none -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
rm -f linux.bin
else
${OBJCOPY} -O binary -R .note -R .comment -S vmlinux linux.bin
rm -f linux.bin.gz
gzip -9 linux.bin
uboot-mkimage -A ${ARCH} -O linux -T kernel -C gzip -a ${UBOOT_ENTRYPOINT} -e ${UBOOT_ENTRYPOINT} -n "${DISTRO_NAME}/${PV}/${MACHINE}" -d linux.bin.gz ${DEPLOYDIR}/${KERNEL_IMAGE_BASE_NAME}.bin
rm -f linux.bin.gz
fi
fi
cd ${DEPLOYDIR}
rm -f ${KERNEL_IMAGE_SYMLINK_NAME}.bin
ln -sf ${KERNEL_IMAGE_BASE_NAME}.bin ${KERNEL_IMAGE_SYMLINK_NAME}.bin
cp ${COREBASE}/meta/files/deploydir_readme.txt ${DEPLOYDIR}/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt
}
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
addtask deploy before do_build after do_install
EXPORT_FUNCTIONS do_deploy
# perf must be enabled in individual kernel recipes
PACKAGES =+ "perf-dbg perf"
FILES_perf = "${bindir}/* \
${libexecdir}"
FILES_perf-dbg = "${FILES_${PN}-dbg}"

View File

@@ -0,0 +1,10 @@
PACKAGES += "${PN}-bin"
FILES_${PN} = "${libexecdir} ${libdir}/lib*${SOLIBS} \
${sysconfdir} ${sharedstatedir} ${localstatedir} \
${base_libdir}/*${SOLIBS} \
${datadir}/${PN} ${libdir}/${PN}"
FILES_${PN}-dev = "${includedir} ${libdir}/lib*${SOLIBSDEV} ${libdir}/*.la \
${libdir}/*.o ${libdir}/pkgconfig /lib/*.o \
${datadir}/aclocal ${bindir}/*-config"
FILES_${PN}-bin = "${bindir}/* ${sbindir}/* /bin/* /sbin/*"

View File

@@ -0,0 +1,30 @@
do_install() {
oe_runmake install_root=${D} install
for r in ${rpcsvc}; do
h=`echo $r|sed -e's,\.x$,.h,'`
install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
done
install -m 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/
install -d ${D}${libdir}/locale
make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
# get rid of some broken files...
for i in ${GLIBC_BROKEN_LOCALES}; do
grep -v $i ${WORKDIR}/SUPPORTED > ${WORKDIR}/SUPPORTED.tmp
mv ${WORKDIR}/SUPPORTED.tmp ${WORKDIR}/SUPPORTED
done
rm -f ${D}${sysconfdir}/rpc
rm -rf ${D}${datadir}/zoneinfo
rm -rf ${D}${libexecdir}/getconf
}
def get_libc_fpu_setting(bb, d):
if d.getVar('TARGET_FPU', 1) in [ 'soft' ]:
return "--without-fp"
return ""
python populate_packages_prepend () {
if d.getVar('DEBIAN_NAMES', 1):
bpn = d.getVar('BPN', 1)
d.setVar('PKG_'+bpn, 'libc6')
d.setVar('PKG_'+bpn+'-dev', 'libc6-dev')
}

View File

@@ -0,0 +1,377 @@
#
# This class knows how to package up [e]glibc. Its shared since prebuild binary toolchains
# may need packaging and its pointless to duplicate this code.
#
# Caller should set GLIBC_INTERNAL_USE_BINARY_LOCALE to one of:
# "compile" - Use QEMU to generate the binary locale files
# "precompiled" - The binary locale files are pregenerated and already present
# "ondevice" - The device will build the locale files upon first boot through the postinst
GLIBC_INTERNAL_USE_BINARY_LOCALE ?= "ondevice"
python __anonymous () {
enabled = d.getVar("ENABLE_BINARY_LOCALE_GENERATION", True)
pn = d.getVar("PN", True)
if pn.endswith("-initial"):
enabled = False
if enabled and int(enabled):
import re
target_arch = d.getVar("TARGET_ARCH", True)
binary_arches = d.getVar("BINARY_LOCALE_ARCHES", True) or ""
use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or ""
for regexp in binary_arches.split(" "):
r = re.compile(regexp)
if r.match(target_arch):
depends = d.getVar("DEPENDS", True)
if use_cross_localedef == "1" :
depends = "%s cross-localedef-native" % depends
else:
depends = "%s qemu-native" % depends
d.setVar("DEPENDS", depends)
d.setVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", "compile")
break
}
OVERRIDES_append = ":${TARGET_ARCH}-${TARGET_OS}"
do_configure_prepend() {
sed -e "s#@BASH@#/bin/sh#" -i ${S}/elf/ldd.bash.in
}
# indentation removed on purpose
locale_base_postinst() {
#!/bin/sh
if [ "x$D" != "x" ]; then
exit 1
fi
rm -rf ${TMP_LOCALE}
mkdir -p ${TMP_LOCALE}
if [ -f ${libdir}/locale/locale-archive ]; then
cp ${libdir}/locale/locale-archive ${TMP_LOCALE}/
fi
localedef --inputfile=${datadir}/i18n/locales/%s --charmap=%s --prefix=/tmp/locale %s
mkdir -p ${libdir}/locale/
mv ${TMP_LOCALE}/locale-archive ${libdir}/locale/
rm -rf ${TMP_LOCALE}
}
# indentation removed on purpose
locale_base_postrm() {
#!/bin/sh
rm -rf ${TMP_LOCALE}
mkdir -p ${TMP_LOCALE}
if [ -f ${libdir}/locale/locale-archive ]; then
cp ${libdir}/locale/locale-archive ${TMP_LOCALE}/
fi
localedef --delete-from-archive --inputfile=${datadir}/locales/%s --charmap=%s --prefix=/tmp/locale %s
mv ${TMP_LOCALE}/locale-archive ${libdir}/locale/
rm -rf ${TMP_LOCALE}
}
TMP_LOCALE="/tmp/locale${libdir}/locale"
LOCALETREESRC ?= "{PKGD}"
do_prep_locale_tree() {
treedir=${WORKDIR}/locale-tree
rm -rf $treedir
mkdir -p $treedir/${base_bindir} $treedir/${base_libdir} $treedir/${datadir} $treedir/${libdir}/locale
tar -cf - -C ${LOCALETREESRC}${datadir} -ps i18n | tar -xf - -C $treedir/${datadir}
# unzip to avoid parsing errors
for i in $treedir/${datadir}/i18n/charmaps/*gz; do
gunzip $i
done
tar -cf - -C ${LOCALETREESRC}${base_libdir} -ps . | tar -xf - -C $treedir/${base_libdir}
if [ -f ${STAGING_DIR_NATIVE}${prefix_native}/lib/libgcc_s.* ]; then
tar -cf - -C ${STAGING_DIR_NATIVE}/${prefix_native}/${base_libdir} -ps libgcc_s.* | tar -xf - -C $treedir/${base_libdir}
fi
install -m 0755 ${LOCALETREESRC}${bindir}/localedef $treedir/${base_bindir}
}
do_collect_bins_from_locale_tree() {
treedir=${WORKDIR}/locale-tree
mkdir -p ${PKGD}${libdir}
tar -cf - -C $treedir/${libdir} -ps locale | tar -xf - -C ${PKGD}${libdir}
}
inherit qemu
python package_do_split_gconvs () {
import os, re
if (d.getVar('PACKAGE_NO_GCONV', True) == '1'):
bb.note("package requested not splitting gconvs")
return
if not d.getVar('PACKAGES', True):
return
bpn = d.getVar('BPN', True)
libdir = d.getVar('libdir', True)
if not libdir:
bb.error("libdir not defined")
return
datadir = d.getVar('datadir', True)
if not datadir:
bb.error("datadir not defined")
return
gconv_libdir = base_path_join(libdir, "gconv")
charmap_dir = base_path_join(datadir, "i18n", "charmaps")
locales_dir = base_path_join(datadir, "i18n", "locales")
binary_locales_dir = base_path_join(libdir, "locale")
def calc_gconv_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "r")
c_re = re.compile('^copy "(.*)"')
i_re = re.compile('^include "(\w+)".*')
for l in f.readlines():
m = c_re.match(l) or i_re.match(l)
if m:
dp = legitimize_package_name('%s-gconv-%s' % (bpn, m.group(1)))
if not dp in deps:
deps.append(dp)
f.close()
if deps != []:
d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, gconv_libdir, file_regex='^(.*)\.so$', output_pattern=bpn+'-gconv-%s', \
description='gconv module for character set %s', hook=calc_gconv_deps, \
extra_depends=bpn+'-gconv')
def calc_charmap_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "r")
c_re = re.compile('^copy "(.*)"')
i_re = re.compile('^include "(\w+)".*')
for l in f.readlines():
m = c_re.match(l) or i_re.match(l)
if m:
dp = legitimize_package_name('%s-charmap-%s' % (bpn, m.group(1)))
if not dp in deps:
deps.append(dp)
f.close()
if deps != []:
d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, charmap_dir, file_regex='^(.*)\.gz$', output_pattern=bpn+'-charmap-%s', \
description='character map for %s encoding', hook=calc_charmap_deps, extra_depends='')
def calc_locale_deps(fn, pkg, file_regex, output_pattern, group):
deps = []
f = open(fn, "r")
c_re = re.compile('^copy "(.*)"')
i_re = re.compile('^include "(\w+)".*')
for l in f.readlines():
m = c_re.match(l) or i_re.match(l)
if m:
dp = legitimize_package_name(bpn+'-localedata-%s' % m.group(1))
if not dp in deps:
deps.append(dp)
f.close()
if deps != []:
d.setVar('RDEPENDS_%s' % pkg, " ".join(deps))
if bpn != 'glibc':
d.setVar('RPROVIDES_%s' % pkg, pkg.replace(bpn, 'glibc'))
do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
d.setVar('PACKAGES', d.getVar('PACKAGES') + ' ' + d.getVar('MLPREFIX') + bpn + '-gconv')
use_bin = d.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", True)
dot_re = re.compile("(.*)\.(.*)")
#GLIBC_GENERATE_LOCALES var specifies which locales to be supported, empty or "all" means all locales
if use_bin != "precompiled":
supported = d.getVar('GLIBC_GENERATE_LOCALES', True)
if not supported or supported == "all":
f = open(base_path_join(d.getVar('WORKDIR', True), "SUPPORTED"), "r")
supported = f.readlines()
f.close()
else:
supported = supported.split()
supported = map(lambda s:s.replace(".", " ") + "\n", supported)
else:
supported = []
full_bin_path = d.getVar('PKGD', True) + binary_locales_dir
for dir in os.listdir(full_bin_path):
dbase = dir.split(".")
d2 = " "
if len(dbase) > 1:
d2 = "." + dbase[1].upper() + " "
supported.append(dbase[0] + d2)
# Collate the locales by base and encoding
utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
encodings = {}
for l in supported:
l = l[:-1]
(locale, charset) = l.split(" ")
if utf8_only and charset != 'UTF-8':
continue
m = dot_re.match(locale)
if m:
locale = m.group(1)
if not encodings.has_key(locale):
encodings[locale] = []
encodings[locale].append(charset)
def output_locale_source(name, pkgname, locale, encoding):
d.setVar('RDEPENDS_%s' % pkgname, 'localedef %s-localedata-%s %s-charmap-%s' % \
(bpn, legitimize_package_name(locale), bpn, legitimize_package_name(encoding)))
d.setVar('pkg_postinst_%s' % pkgname, d.getVar('locale_base_postinst', True) \
% (locale, encoding, locale))
d.setVar('pkg_postrm_%s' % pkgname, d.getVar('locale_base_postrm', True) % \
(locale, encoding, locale))
def output_locale_binary_rdepends(name, pkgname, locale, encoding):
m = re.match("(.*)\.(.*)", name)
if m:
libc_name = "%s.%s" % (m.group(1), m.group(2).lower().replace("-",""))
else:
libc_name = name
d.setVar('RDEPENDS_%s' % pkgname, legitimize_package_name('%s-binary-localedata-%s' \
% (bpn, libc_name)))
commands = {}
def output_locale_binary(name, pkgname, locale, encoding):
treedir = base_path_join(d.getVar("WORKDIR", True), "locale-tree")
ldlibdir = base_path_join(treedir, d.getVar("base_libdir", True))
path = d.getVar("PATH", True)
i18npath = base_path_join(treedir, datadir, "i18n")
gconvpath = base_path_join(treedir, "iconvdata")
outputpath = base_path_join(treedir, libdir, "locale")
use_cross_localedef = d.getVar("LOCALE_GENERATION_WITH_CROSS-LOCALEDEF", True) or "0"
if use_cross_localedef == "1":
target_arch = d.getVar('TARGET_ARCH', True)
locale_arch_options = { \
"arm": " --uint32-align=4 --little-endian ", \
"powerpc": " --uint32-align=4 --big-endian ", \
"powerpc64": " --uint32-align=4 --big-endian ", \
"mips": " --uint32-align=4 --big-endian ", \
"mipsel": " --uint32-align=4 --little-endian ", \
"i586": " --uint32-align=4 --little-endian ", \
"x86_64": " --uint32-align=4 --little-endian " }
if target_arch in locale_arch_options:
localedef_opts = locale_arch_options[target_arch]
else:
bb.error("locale_arch_options not found for target_arch=" + target_arch)
raise bb.build.FuncFailed("unknown arch:" + target_arch + " for locale_arch_options")
localedef_opts += " --force --old-style --no-archive --prefix=%s \
--inputfile=%s/%s/i18n/locales/%s --charmap=%s %s/%s" \
% (treedir, treedir, datadir, locale, encoding, outputpath, name)
cmd = "PATH=\"%s\" I18NPATH=\"%s\" GCONV_PATH=\"%s\" cross-localedef %s" % \
(path, i18npath, gconvpath, localedef_opts)
else: # earlier slower qemu way
qemu = qemu_target_binary(d)
localedef_opts = "--force --old-style --no-archive --prefix=%s \
--inputfile=%s/i18n/locales/%s --charmap=%s %s" \
% (treedir, datadir, locale, encoding, name)
qemu_options = d.getVar("QEMU_OPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True)
if not qemu_options:
qemu_options = d.getVar('QEMU_OPTIONS', True)
cmd = "PSEUDO_RELOADED=YES PATH=\"%s\" I18NPATH=\"%s\" %s -L %s \
-E LD_LIBRARY_PATH=%s %s %s/bin/localedef %s" % \
(path, i18npath, qemu, treedir, ldlibdir, qemu_options, treedir, localedef_opts)
commands["%s/%s" % (outputpath, name)] = cmd
bb.note("generating locale %s (%s)" % (locale, encoding))
def output_locale(name, locale, encoding):
pkgname = d.getVar('MLPREFIX') + 'locale-base-' + legitimize_package_name(name)
d.setVar('ALLOW_EMPTY_%s' % pkgname, '1')
d.setVar('PACKAGES', '%s %s' % (pkgname, d.getVar('PACKAGES', True)))
rprovides = ' virtual-locale-%s' % legitimize_package_name(name)
m = re.match("(.*)_(.*)", name)
if m:
rprovides += ' virtual-locale-%s' % m.group(1)
d.setVar('RPROVIDES_%s' % pkgname, rprovides)
if use_bin == "compile":
output_locale_binary_rdepends(name, pkgname, locale, encoding)
output_locale_binary(name, pkgname, locale, encoding)
elif use_bin == "precompiled":
output_locale_binary_rdepends(name, pkgname, locale, encoding)
else:
output_locale_source(name, pkgname, locale, encoding)
if use_bin == "compile":
bb.note("preparing tree for binary locale generation")
bb.build.exec_func("do_prep_locale_tree", d)
# Reshuffle names so that UTF-8 is preferred over other encodings
non_utf8 = []
for l in encodings.keys():
if len(encodings[l]) == 1:
output_locale(l, l, encodings[l][0])
if encodings[l][0] != "UTF-8":
non_utf8.append(l)
else:
if "UTF-8" in encodings[l]:
output_locale(l, l, "UTF-8")
encodings[l].remove("UTF-8")
else:
non_utf8.append(l)
for e in encodings[l]:
output_locale('%s.%s' % (l, e), l, e)
if non_utf8 != [] and use_bin != "precompiled":
bb.note("the following locales are supported only in legacy encodings:")
bb.note(" " + " ".join(non_utf8))
if use_bin == "compile":
makefile = base_path_join(d.getVar("WORKDIR", True), "locale-tree", "Makefile")
m = open(makefile, "w")
m.write("all: %s\n\n" % " ".join(commands.keys()))
for cmd in commands:
m.write(cmd + ":\n")
m.write(" " + commands[cmd] + "\n\n")
m.close()
d.setVar("B", os.path.dirname(makefile))
d.setVar("EXTRA_OEMAKE", "${PARALLEL_MAKE}")
bb.note("Executing binary locale generation makefile")
bb.build.exec_func("oe_runmake", d)
bb.note("collecting binary locales from locale tree")
bb.build.exec_func("do_collect_bins_from_locale_tree", d)
do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
output_pattern=bpn+'-binary-localedata-%s', \
description='binary locale definition for %s', extra_depends='', allow_dirs=True)
elif use_bin == "precompiled":
do_split_packages(d, binary_locales_dir, file_regex='(.*)', \
output_pattern=bpn+'-binary-localedata-%s', \
description='binary locale definition for %s', extra_depends='', allow_dirs=True)
else:
bb.note("generation of binary locales disabled. this may break i18n!")
}
# We want to do this indirection so that we can safely 'return'
# from the called function even though we're prepending
python populate_packages_prepend () {
bb.build.exec_func('package_do_split_gconvs', d)
}

View File

@@ -0,0 +1,358 @@
# Populates LICENSE_DIRECTORY as set in distro config with the license files as set by
# LIC_FILES_CHKSUM.
# TODO:
# - There is a real issue revolving around license naming standards.
LICENSE_DIRECTORY ??= "${DEPLOY_DIR}/licenses"
LICSSTATEDIR = "${WORKDIR}/license-destdir/"
addtask populate_lic after do_patch before do_package
do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
# Standards are great! Everyone has their own. In an effort to standardize licensing
# names, common-licenses will use the SPDX standard license names. In order to not
# break the non-standardized license names that we find in LICENSE, we'll set
# up a bunch of VarFlags to accomodate non-SPDX license names.
#
# We should really discuss standardizing this field, but that's a longer term goal.
# For now, we can do this and it should grab the most common LICENSE naming variations.
#
# Changing GPL mapping to GPL-2 as it's not very likely to be GPL-1
# We should NEVER have a GPL/LGPL without a version!!!!
# Any mapping to MPL/LGPL/GPL should be fixed
# see: https://wiki.yoctoproject.org/wiki/License_Audit
# GPL variations
SPDXLICENSEMAP[GPL-2] = "GPL-2.0"
SPDXLICENSEMAP[GPLv2] = "GPL-2.0"
SPDXLICENSEMAP[GPLv2.0] = "GPL-2.0"
SPDXLICENSEMAP[GPL-3] = "GPL-3.0"
SPDXLICENSEMAP[GPLv3] = "GPL-3.0"
SPDXLICENSEMAP[GPLv3.0] = "GPL-3.0"
#LGPL variations
SPDXLICENSEMAP[LGPLv2] = "LGPL-2.0"
SPDXLICENSEMAP[LGPL2.1] = "LGPL-2.1"
SPDXLICENSEMAP[LGPLv2.1] = "LGPL-2.1"
SPDXLICENSEMAP[LGPLv3] = "LGPL-3.0"
#MPL variations
SPDXLICENSEMAP[MPL-1] = "MPL-1.0"
SPDXLICENSEMAP[MPLv1] = "MPL-1.0"
SPDXLICENSEMAP[MPLv1.1] = "MPL-1.1"
#MIT variations
SPDXLICENSEMAP[MIT-X] = "MIT"
SPDXLICENSEMAP[MIT-style] = "MIT"
#Openssl variations
SPDXLICENSEMAP[openssl] = "OpenSSL"
#Python variations
SPDXLICENSEMAP[PSF] = "Python-2.0"
SPDXLICENSEMAP[PSFv2] = "Python-2.0"
SPDXLICENSEMAP[Python-2] = "Python-2.0"
#Apache variations
SPDXLICENSEMAP[Apachev2] = "Apache-2.0"
SPDXLICENSEMAP[Apache-2] = "Apache-2.0"
#Artistic variations
SPDXLICENSEMAP[Artisticv1] = "Artistic-1.0"
SPDXLICENSEMAP[Artistic-1] = "Artistic-1.0"
#Academic variations
SPDXLICENSEMAP[AFL-2] = "AFL-2.0"
SPDXLICENSEMAP[AFL-1] = "AFL-1.2"
SPDXLICENSEMAP[AFLv2] = "AFL-2.0"
SPDXLICENSEMAP[AFLv1] = "AFL-1.2"
#Other variations
SPDXLICENSEMAP[EPLv1.0] = "EPL-1.0"
license_create_manifest() {
mkdir -p ${LICENSE_DIRECTORY}/${IMAGE_NAME}
# Get list of installed packages
list_installed_packages | grep -v "locale" |sort > ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest
INSTALLED_PKGS=`cat ${LICENSE_DIRECTORY}/${IMAGE_NAME}/package.manifest`
# list of installed packages is broken for deb
for pkg in ${INSTALLED_PKGS}; do
# not the best way to do this but licenses are not arch dependant iirc
files=`find ${TMPDIR}/pkgdata/*/runtime -name ${pkg}| head -1`
for filename in $files; do
pkged_pn="$(sed -n 's/^PN: //p' ${filename})"
pkged_lic="$(sed -n '/^LICENSE: /{ s/^LICENSE: //; s/[+|&()*]/ /g; s/ */ /g; p }' ${filename})"
# check to see if the package name exists in the manifest. if so, bail.
if ! grep -q "PACKAGE NAME: ${pkg}" ${filename}; then
# exclude local recipes
if [ ! "${pkged_pn}" = "*locale*" ]; then
echo "PACKAGE NAME:" ${pkg} >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
echo "RECIPE NAME:" ${pkged_pn} >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
echo "LICENSE: " >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
for lic in ${pkged_lic}; do
if [ -e "${LICENSE_DIRECTORY}/${pkged_pn}/generic_${lic}" ]; then
echo ${lic}|sed s'/generic_//'g >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
else
echo "WARNING: The license listed, " ${lic} " was not in the licenses collected for " ${pkged_pn}>> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
fi
done
echo "" >> ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest
fi
fi
done
done
# Two options here:
# - Just copy the manifest
# - Copy the manifest and the license directories
# This will make your image a bit larger, however
# if you are concerned about license compliance
# and delivery this should cover all your bases
if [ -n "${COPY_LIC_MANIFEST}" ]; then
mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/
cp ${LICENSE_DIRECTORY}/${IMAGE_NAME}/license.manifest ${IMAGE_ROOTFS}/usr/share/common-licenses/license.manifest
if [ -n "${COPY_LIC_DIRS}" ]; then
for pkg in ${INSTALLED_PKGS}; do
mkdir -p ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}
for lic in `ls ${LICENSE_DIRECTORY}/${pkged_pn}`; do
# Really don't need to copy the generics as they're
# represented in the manifest and in the actual pkg licenses
# Doing so would make your image quite a bit larger
if [ ! ${lic} = "generic_*" ]; then
cp ${LICENSE_DIRECTORY}/${pkged_pn}/${lic} ${IMAGE_ROOTFS}/usr/share/common-licenses/${pkg}/${lic}
fi
done
done
fi
fi
}
python do_populate_lic() {
"""
Populate LICENSE_DIRECTORY with licenses.
"""
import os
import bb
import shutil
import oe.license
# All the license types for the package
license_types = d.getVar('LICENSE', True)
# All the license files for the package
lic_files = d.getVar('LIC_FILES_CHKSUM', True)
pn = d.getVar('PN', True)
# The base directory we wrangle licenses to
destdir = os.path.join(d.getVar('LICSSTATEDIR', True), pn)
# The license files are located in S/LIC_FILE_CHECKSUM.
srcdir = d.getVar('S', True)
# Directory we store the generic licenses as set in the distro configuration
generic_directory = d.getVar('COMMON_LICENSE_DIR', True)
license_source_dirs = []
license_source_dirs.append(generic_directory)
try:
additional_lic_dirs = d.getVar('LICENSE_DIR', True).split()
for lic_dir in additional_lic_dirs:
license_source_dirs.append(lic_dir)
except:
pass
class FindVisitor(oe.license.LicenseVisitor):
def visit_Str(self, node):
#
# Until I figure out what to do with
# the two modifiers I support (or greater = +
# and "with exceptions" being *
# we'll just strip out the modifier and put
# the base license.
find_license(node.s.replace("+", "").replace("*", ""))
self.generic_visit(node)
def find_license(license_type):
try:
bb.mkdirhier(gen_lic_dest)
except:
pass
spdx_generic = None
license_source = None
# If the generic does not exist we need to check to see if there is an SPDX mapping to it
for lic_dir in license_source_dirs:
if not os.path.isfile(os.path.join(lic_dir, license_type)):
if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
# Great, there is an SPDXLICENSEMAP. We can copy!
bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
license_source = lic_dir
break
elif os.path.isfile(os.path.join(lic_dir, license_type)):
spdx_generic = license_type
license_source = lic_dir
break
if spdx_generic and license_source:
# we really should copy to generic_ + spdx_generic, however, that ends up messing the manifest
# audit up. This should be fixed in emit_pkgdata (or, we actually got and fix all the recipes)
ret = bb.copyfile(os.path.join(license_source, spdx_generic), os.path.join(os.path.join(d.getVar('LICSSTATEDIR', True), pn), "generic_" + license_type))
# If the copy didn't occur, something horrible went wrong and we fail out
if not ret:
bb.warn("%s for %s could not be copied for some reason. It may not exist. WARN for now." % (spdx_generic, pn))
else:
# And here is where we warn people that their licenses are lousy
bb.warn("%s: No generic license file exists for: %s in any provider" % (pn, license_type))
pass
try:
bb.mkdirhier(destdir)
except:
pass
if not generic_directory:
raise bb.build.FuncFailed("COMMON_LICENSE_DIR is unset. Please set this in your distro config")
if not lic_files:
# No recipe should have an invalid license file. This is checked else
# where, but let's be pedantic
bb.note(pn + ": Recipe file does not have license file information.")
return True
for url in lic_files.split():
(type, host, path, user, pswd, parm) = bb.decodeurl(url)
# We want the license file to be copied into the destination
srclicfile = os.path.join(srcdir, path)
ret = bb.copyfile(srclicfile, os.path.join(destdir, os.path.basename(path)))
# If the copy didn't occur, something horrible went wrong and we fail out
if not ret:
bb.warn("%s could not be copied for some reason. It may not exist. WARN for now." % srclicfile)
v = FindVisitor()
try:
v.visit_string(license_types)
except oe.license.InvalidLicense as exc:
bb.fatal('%s: %s' % (d.getVar('PF', True), exc))
except SyntaxError:
bb.warn("%s: Failed to parse it's LICENSE field." % (d.getVar('PF', True)))
}
def incompatible_license(d,dont_want_license):
"""
This function checks if a package has only incompatible licenses. It also take into consideration 'or'
operand.
"""
import re
import oe.license
from fnmatch import fnmatchcase as fnmatch
dont_want_licenses = []
dont_want_licenses.append(d.getVar('INCOMPATIBLE_LICENSE', 1))
if d.getVarFlag('SPDXLICENSEMAP', dont_want_license):
dont_want_licenses.append(d.getVarFlag('SPDXLICENSEMAP', dont_want_license))
def include_license(license):
if any(fnmatch(license, pattern) for pattern in dont_want_licenses):
return False
else:
spdx_license = d.getVarFlag('SPDXLICENSEMAP', license)
if spdx_license and any(fnmatch(spdx_license, pattern) for pattern in dont_want_licenses):
return False
else:
return True
def choose_licenses(a, b):
if all(include_license(lic) for lic in a):
return a
else:
return b
"""
If you want to exlude license named generically 'X', we surely want to exlude 'X+' as well.
In consequence, we will exclude the '+' character from LICENSE in case INCOMPATIBLE_LICENSE
is not a 'X+' license.
"""
if not re.search(r'[+]',dont_want_license):
licenses=oe.license.flattened_licenses(re.sub(r'[+]', '', d.getVar('LICENSE', True)), choose_licenses)
else:
licenses=oe.license.flattened_licenses(d.getVar('LICENSE', True), choose_licenses)
for onelicense in licenses:
if not include_license(onelicense):
return True
return False
def check_license_flags(d):
"""
This function checks if a recipe has any LICENSE_FLAGs that
aren't whitelisted.
If it does, it returns the first LICENSE_FLAG missing from the
whitelist, or all the LICENSE_FLAGs if there is no whitelist.
If everything is is properly whitelisted, it returns None.
"""
def license_flag_matches(flag, whitelist, pn):
"""
Return True if flag matches something in whitelist, None if not.
Before we test a flag against the whitelist, we append _${PN}
to it. We then try to match that string against the
whitelist. This covers the normal case, where we expect
LICENSE_FLAGS to be a simple string like 'commercial', which
the user typically matches exactly in the whitelist by
explicitly appending the package name e.g 'commercial_foo'.
If we fail the match however, we then split the flag across
'_' and append each fragment and test until we either match or
run out of fragments.
"""
flag_pn = ("%s_%s" % (flag, pn))
for candidate in whitelist:
if flag_pn == candidate:
return True
flag_cur = ""
flagments = flag_pn.split("_")
flagments.pop() # we've already tested the full string
for flagment in flagments:
if flag_cur:
flag_cur += "_"
flag_cur += flagment
for candidate in whitelist:
if flag_cur == candidate:
return True
return False
def all_license_flags_match(license_flags, whitelist):
""" Return first unmatched flag, None if all flags match """
pn = d.getVar('PN', True)
split_whitelist = whitelist.split()
for flag in license_flags.split():
if not license_flag_matches(flag, split_whitelist, pn):
return flag
return None
license_flags = d.getVar('LICENSE_FLAGS', True)
if license_flags:
whitelist = d.getVar('LICENSE_FLAGS_WHITELIST', True)
if not whitelist:
return license_flags
unmatched_flag = all_license_flags_match(license_flags, whitelist)
if unmatched_flag:
return unmatched_flag
return None
SSTATETASKS += "do_populate_lic"
do_populate_lic[sstate-name] = "populate-lic"
do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
ROOTFS_POSTINSTALL_COMMAND += "license_create_manifest; "
python do_populate_lic_setscene () {
sstate_setscene(d)
}
addtask do_populate_lic_setscene

View File

@@ -0,0 +1,32 @@
# parse kernel ABI version out of <linux/version.h>
def get_kernelversion(p):
import re
fn = p + '/include/linux/utsrelease.h'
if not os.path.isfile(fn):
# after 2.6.33-rc1
fn = p + '/include/generated/utsrelease.h'
if not os.path.isfile(fn):
fn = p + '/include/linux/version.h'
import re
try:
f = open(fn, 'r')
except IOError:
return None
l = f.readlines()
f.close()
r = re.compile("#define UTS_RELEASE \"(.*)\"")
for s in l:
m = r.match(s)
if m:
return m.group(1)
return None
def linux_module_packages(s, d):
suffix = ""
return " ".join(map(lambda s: "kernel-module-%s%s" % (s.lower().replace('_', '-').replace('@', '+'), suffix), s.split()))
# that's all

View File

@@ -0,0 +1,72 @@
# The following logging mechanisms are to be used in bash functions of recipes.
# They are intended to map one to one in intention and output format with the
# python recipe logging functions of a similar naming convention: bb.plain(),
# bb.note(), etc.
#
# For the time being, all of these print only to the task logs. Future
# enhancements may integrate these calls with the bitbake logging
# infrastructure, allowing for printing to the console as appropriate. The
# interface and intention statements reflect that future goal. Once it is
# in place, no changes will be necessary to recipes using these logging
# mechanisms.
# Print the output exactly as it is passed in. Typically used for output of
# tasks that should be seen on the console. Use sparingly.
# Output: logs console
# NOTE: console output is not currently implemented.
bbplain() {
echo "$*"
}
# Notify the user of a noteworthy condition.
# Output: logs console
# NOTE: console output is not currently implemented.
bbnote() {
echo "NOTE: $*"
}
# Print a warning to the log. Warnings are non-fatal, and do not
# indicate a build failure.
# Output: logs
bbwarn() {
echo "WARNING: $*"
}
# Print an error to the log. Errors are non-fatal in that the build can
# continue, but they do indicate a build failure.
# Output: logs
bberror() {
echo "ERROR: $*"
}
# Print a fatal error to the log. Fatal errors indicate build failure
# and halt the build, exiting with an error code.
# Output: logs
bbfatal() {
echo "ERROR: $*"
exit 1
}
# Print debug messages. These are appropriate for progress checkpoint
# messages to the logs. Depending on the debug log level, they may also
# go to the console.
# Output: logs console
# Usage: bbdebug 1 "first level debug message"
# bbdebug 2 "second level debug message"
# NOTE: console output is not currently implemented.
bbdebug() {
USAGE='Usage: bbdebug [123] "message"'
if [ $# -lt 2 ]; then
bbfatal "$USAGE"
fi
# Strip off the debug level and ensure it is an integer
DBGLVL=$1; shift
if ! [[ "$DBGLVL" =~ ^[0-9]+ ]]; then
bbfatal "$USAGE"
fi
# All debug output is printed to the logs
echo "DEBUG: $*"
}

View File

@@ -0,0 +1,4 @@
PACKAGES = ""
do_build[recrdeptask] = "do_build"

View File

@@ -0,0 +1,77 @@
METADATA_BRANCH ?= "${@base_detect_branch(d)}"
METADATA_REVISION ?= "${@base_detect_revision(d)}"
def base_detect_revision(d):
path = base_get_scmbasepath(d)
scms = [base_get_metadata_git_revision, \
base_get_metadata_svn_revision]
for scm in scms:
rev = scm(path, d)
if rev <> "<unknown>":
return rev
return "<unknown>"
def base_detect_branch(d):
path = base_get_scmbasepath(d)
scms = [base_get_metadata_git_branch]
for scm in scms:
rev = scm(path, d)
if rev <> "<unknown>":
return rev.strip()
return "<unknown>"
def base_get_scmbasepath(d):
return d.getVar( 'COREBASE', 1 )
def base_get_metadata_monotone_branch(path, d):
monotone_branch = "<unknown>"
try:
monotone_branch = file( "%s/_MTN/options" % path ).read().strip()
if monotone_branch.startswith( "database" ):
monotone_branch_words = monotone_branch.split()
monotone_branch = monotone_branch_words[ monotone_branch_words.index( "branch" )+1][1:-1]
except:
pass
return monotone_branch
def base_get_metadata_monotone_revision(path, d):
monotone_revision = "<unknown>"
try:
monotone_revision = file( "%s/_MTN/revision" % path ).read().strip()
if monotone_revision.startswith( "format_version" ):
monotone_revision_words = monotone_revision.split()
monotone_revision = monotone_revision_words[ monotone_revision_words.index( "old_revision" )+1][1:-1]
except IOError:
pass
return monotone_revision
def base_get_metadata_svn_revision(path, d):
revision = "<unknown>"
try:
revision = file( "%s/.svn/entries" % path ).readlines()[3].strip()
except IOError:
pass
return revision
def base_get_metadata_git_branch(path, d):
branch = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path).read()
if len(branch) != 0:
return branch
return "<unknown>"
def base_get_metadata_git_revision(path, d):
f = os.popen("cd %s; git log -n 1 --pretty=oneline -- 2>&1" % path)
data = f.read()
if f.close() is None:
rev = data.split(" ")[0]
if len(rev) != 0:
return rev
return "<unknown>"

View File

@@ -0,0 +1,60 @@
DEPENDS += "shared-mime-info-native shared-mime-info"
EXTRA_OECONF += "--disable-update-mimedb"
mime_postinst() {
if [ "$1" = configure ]; then
UPDATEMIMEDB=`which update-mime-database`
if [ -x "$UPDATEMIMEDB" ] ; then
echo "Updating MIME database... this may take a while."
$UPDATEMIMEDB $D${datadir}/mime
else
echo "Missing update-mime-database, update of mime database failed!"
exit 1
fi
fi
}
mime_postrm() {
if [ "$1" = remove ] || [ "$1" = upgrade ]; then
UPDATEMIMEDB=`which update-mime-database`
if [ -x "$UPDATEMIMEDB" ] ; then
echo "Updating MIME database... this may take a while."
$UPDATEMIMEDB $D${datadir}/mime
else
echo "Missing update-mime-database, update of mime database failed!"
exit 1
fi
fi
}
python populate_packages_append () {
import re
packages = d.getVar('PACKAGES', True).split()
pkgdest = d.getVar('PKGDEST', True)
for pkg in packages:
mime_dir = '%s/%s/usr/share/mime/packages' % (pkgdest, pkg)
mimes = []
mime_re = re.compile(".*\.xml$")
if os.path.exists(mime_dir):
for f in os.listdir(mime_dir):
if mime_re.match(f):
mimes.append(f)
if mimes:
bb.note("adding mime postinst and postrm scripts to %s" % pkg)
postinst = d.getVar('pkg_postinst_%s' % pkg, True) or d.getVar('pkg_postinst', True)
if not postinst:
postinst = '#!/bin/sh\n'
postinst += d.getVar('mime_postinst', True)
d.setVar('pkg_postinst_%s' % pkg, postinst)
postrm = d.getVar('pkg_postrm_%s' % pkg, True) or d.getVar('pkg_postrm', True)
if not postrm:
postrm = '#!/bin/sh\n'
postrm += d.getVar('mime_postrm', True)
d.setVar('pkg_postrm_%s' % pkg, postrm)
bb.note("adding shared-mime-info-data dependency to %s" % pkg)
rdepends = explode_deps(d.getVar('RDEPENDS_' + pkg, False) or d.getVar('RDEPENDS', False)) or ""
rdepends.append("shared-mime-info-data")
d.setVar('RDEPENDS_' + pkg, " " + " ".join(rdepends))
}

View File

@@ -0,0 +1,56 @@
MIRRORS += "\
${DEBIAN_MIRROR}/main http://snapshot.debian.net/archive/pool \n \
${DEBIAN_MIRROR} ftp://ftp.de.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.au.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.cl.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.hr.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.fi.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.hk.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.hu.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.ie.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.it.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.jp.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.no.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.pl.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.ro.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.si.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.es.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.se.debian.org/debian/pool \n \
${DEBIAN_MIRROR} ftp://ftp.tr.debian.org/debian/pool \n \
${GNU_MIRROR} ftp://mirrors.kernel.org/gnu \n \
${GNU_MIRROR} ftp://ftp.matrix.com.br/pub/gnu \n \
${GNU_MIRROR} ftp://ftp.cs.ubc.ca/mirror2/gnu \n \
${GNU_MIRROR} ftp://sunsite.ust.hk/pub/gnu \n \
${GNU_MIRROR} ftp://ftp.ayamura.org/pub/gnu \n \
${KERNELORG_MIRROR} http://www.kernel.org/pub \n \
${KERNELORG_MIRROR} ftp://ftp.us.kernel.org/pub \n \
${KERNELORG_MIRROR} ftp://ftp.uk.kernel.org/pub \n \
${KERNELORG_MIRROR} ftp://ftp.hk.kernel.org/pub \n \
${KERNELORG_MIRROR} ftp://ftp.au.kernel.org/pub \n \
${KERNELORG_MIRROR} ftp://ftp.jp.kernel.org/pub \n \
ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.franken.de/pub/crypt/mirror/ftp.gnupg.org/gcrypt/ \n \
ftp://ftp.gnupg.org/gcrypt/ ftp://ftp.surfnet.nl/pub/security/gnupg/ \n \
ftp://ftp.gnupg.org/gcrypt/ http://gulus.USherbrooke.ca/pub/appl/GnuPG/ \n \
ftp://dante.ctan.org/tex-archive ftp://ftp.fu-berlin.de/tex/CTAN \n \
ftp://dante.ctan.org/tex-archive http://sunsite.sut.ac.jp/pub/archives/ctan/ \n \
ftp://dante.ctan.org/tex-archive http://ctan.unsw.edu.au/ \n \
ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnutls.org/pub/gnutls/ \n \
ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.gnupg.org/gcrypt/gnutls/ \n \
ftp://ftp.gnutls.org/pub/gnutls http://www.mirrors.wiretapped.net/security/network-security/gnutls/ \n \
ftp://ftp.gnutls.org/pub/gnutls ftp://ftp.mirrors.wiretapped.net/pub/security/network-security/gnutls/ \n \
ftp://ftp.gnutls.org/pub/gnutls http://josefsson.org/gnutls/releases/ \n \
http://ftp.info-zip.org/pub/infozip/src/ http://mirror.switch.ch/ftp/mirror/infozip/src/ \n \
http://ftp.info-zip.org/pub/infozip/src/ ftp://sunsite.icm.edu.pl/pub/unix/archiving/info-zip/src/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cerias.purdue.edu/pub/tools/unix/sysutils/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tau.ac.il/pub/unix/admin/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.cert.dfn.de/pub/tools/admin/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.fu-berlin.de/pub/unix/tools/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.kaizo.org/pub/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tu-darmstadt.de/pub/sysadmin/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://ftp.tux.org/pub/sites/vic.cc.purdue.edu/tools/unix/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/ ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
http://www.apache.org/dist http://archive.apache.org/dist \n \
"

View File

@@ -0,0 +1,28 @@
inherit module_strip
inherit kernel-arch
export OS = "${TARGET_OS}"
export CROSS_COMPILE = "${TARGET_PREFIX}"
export KERNEL_VERSION = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-abiversion')}"
KERNEL_OBJECT_SUFFIX = ".ko"
KERNEL_CCSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-ccsuffix')}"
KERNEL_LDSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-ldsuffix')}"
KERNEL_ARSUFFIX = "${@base_read_file('${STAGING_KERNEL_DIR}/kernel-arsuffix')}"
# Set TARGET_??_KERNEL_ARCH in the machine .conf to set architecture
# specific options necessary for building the kernel and modules.
TARGET_CC_KERNEL_ARCH ?= ""
HOST_CC_KERNEL_ARCH ?= "${TARGET_CC_KERNEL_ARCH}"
TARGET_LD_KERNEL_ARCH ?= ""
HOST_LD_KERNEL_ARCH ?= "${TARGET_LD_KERNEL_ARCH}"
TARGET_AR_KERNEL_ARCH ?= ""
HOST_AR_KERNEL_ARCH ?= "${TARGET_AR_KERNEL_ARCH}"
KERNEL_CC = "${CCACHE}${HOST_PREFIX}gcc${KERNEL_CCSUFFIX} ${HOST_CC_KERNEL_ARCH}"
KERNEL_LD = "${HOST_PREFIX}ld${KERNEL_LDSUFFIX} ${HOST_LD_KERNEL_ARCH}"
KERNEL_AR = "${HOST_PREFIX}ar${KERNEL_ARSUFFIX} ${HOST_AR_KERNEL_ARCH}"
# kernel modules are generally machine specific
PACKAGE_ARCH = "${MACHINE_ARCH}"

View File

@@ -0,0 +1,53 @@
RDEPENDS += "kernel-image update-modules"
DEPENDS += "virtual/kernel"
inherit module-base
#
# Ensure the hostprogs are available for module compilation. Modules that
# inherit this recipe and override do_compile() should be sure to call
# do_make_scripts() or ensure the scripts are built independently.
#
do_make_scripts() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
-C ${STAGING_KERNEL_DIR} scripts
}
addtask make_scripts before do_compile
do_make_scripts[lockfiles] = "${TMPDIR}/kernel-scripts.lock"
do_make_scripts[deptask] = "do_populate_sysroot"
module_do_compile() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR} \
KERNEL_SRC=${STAGING_KERNEL_DIR} \
KERNEL_VERSION=${KERNEL_VERSION} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
AR="${KERNEL_AR}" \
${MAKE_TARGETS}
}
module_do_install() {
unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
KERNEL_SRC=${STAGING_KERNEL_DIR} \
CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
modules_install
}
pkg_postinst_append () {
if [ -n "$D" ]; then
exit 1
fi
depmod -a
update-modules || true
}
pkg_postrm_append () {
update-modules || true
}
EXPORT_FUNCTIONS do_compile do_install
FILES_${PN} = "/etc /lib/modules"

View File

@@ -0,0 +1,64 @@
python multilib_virtclass_handler () {
if not isinstance(e, bb.event.RecipePreFinalise):
return
cls = e.data.getVar("BBEXTENDCURR", True)
variant = e.data.getVar("BBEXTENDVARIANT", True)
if cls != "multilib" or not variant:
return
# There should only be one kernel in multilib configs
if bb.data.inherits_class('kernel', e.data) or bb.data.inherits_class('module-base', e.data):
raise bb.parse.SkipPackage("We shouldn't have multilib variants for the kernel")
if bb.data.inherits_class('image', e.data):
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
return
save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME", True) or ""
for name in save_var_name.split():
val=e.data.getVar(name, True)
if val:
e.data.setVar(name + "_MULTILIB_ORIGINAL", val)
override = ":virtclass-multilib-" + variant
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("SHLIBSDIR_virtclass-multilib-" + variant ,e.data.getVar("SHLIBSDIR", False) + "/" + variant)
e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + variant, e.data.getVar("TARGET_VENDOR", False) + "ml" + variant)
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
}
addhandler multilib_virtclass_handler
STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
python __anonymous () {
variant = d.getVar("BBEXTENDVARIANT", True)
import oe.classextend
clsextend = oe.classextend.ClassExtender(variant, d)
if bb.data.inherits_class('image', d):
clsextend.map_depends_variable("PACKAGE_INSTALL")
clsextend.map_depends_variable("LINGUAS_INSTALL")
clsextend.map_depends_variable("RDEPENDS")
pinstall = d.getVar("LINGUAS_INSTALL", True) + " " + d.getVar("PACKAGE_INSTALL", True)
d.setVar("PACKAGE_INSTALL", pinstall)
d.setVar("LINGUAS_INSTALL", "")
# FIXME, we need to map this to something, not delete it!
d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", "")
return
clsextend.rename_packages()
clsextend.rename_package_variables((d.getVar("PACKAGEVARS", True) or "").split())
clsextend.map_depends_variable("DEPENDS")
clsextend.map_packagevars()
clsextend.map_variable("PROVIDES")
clsextend.map_variable("PACKAGES_DYNAMIC")
clsextend.map_variable("PACKAGE_INSTALL")
clsextend.map_variable("INITSCRIPT_PACKAGES")
}

View File

@@ -0,0 +1,38 @@
python multilib_virtclass_handler_global () {
if not e.data:
return
variant = e.data.getVar("BBEXTENDVARIANT", True)
if isinstance(e, bb.event.RecipeParsed) and not variant:
if bb.data.inherits_class('kernel', e.data) or bb.data.inherits_class('module-base', e.data):
variants = (e.data.getVar("MULTILIB_VARIANTS", True) or "").split()
import oe.classextend
clsextends = []
for variant in variants:
clsextends.append(oe.classextend.ClassExtender(variant, e.data))
# Process PROVIDES
origprovs = provs = e.data.getVar("PROVIDES", True) or ""
for clsextend in clsextends:
provs = provs + " " + clsextend.map_variable("PROVIDES", setvar=False)
e.data.setVar("PROVIDES", provs)
# Process RPROVIDES
origrprovs = rprovs = e.data.getVar("RPROVIDES", True) or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES", setvar=False)
e.data.setVar("RPROVIDES", rprovs)
# Process RPROVIDES_${PN}...
for pkg in (e.data.getVar("PACKAGES", True) or "").split():
origrprovs = rprovs = e.data.getVar("RPROVIDES_%s" % pkg, True) or ""
for clsextend in clsextends:
rprovs = rprovs + " " + clsextend.map_variable("RPROVIDES_%s" % pkg, setvar=False)
rprovs = rprovs + " " + clsextend.extname + "-" + pkg
e.data.setVar("RPROVIDES_%s" % pkg, rprovs)
}
addhandler multilib_virtclass_handler_global

View File

@@ -0,0 +1,29 @@
inherit siteinfo
# If applicable on the architecture, this routine will rename the header and add
# a unique identifier to the name for the ABI/bitsize that is being used. A wrapper will
# be generated for the architecture that knows how to call all of the ABI variants for that
# given architecture.
#
# TODO: mips64 n32 is not yet recognized in this code
# when that is identified the name of the wrapped item should be "n32" and appropriately
# determined int he if coding...
#
oe_multilib_header() {
# Do nothing on ARM, only one ABI is supported at once
if echo ${TARGET_ARCH} | grep -q arm; then
return
fi
for each_header in "$@" ; do
if [ ! -f "${D}/${includedir}/$each_header" ]; then
bberror "oe_multilib_header: Unable to find header $each_header."
continue
fi
stem=$(echo $each_header | sed 's#\.h$##')
ident=${SITEINFO_BITS}
# if mips64/n32 set ident to n32
mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
done
}

View File

@@ -0,0 +1,156 @@
# We want native packages to be relocatable
inherit relocatable
# Native packages are built indirectly via dependency,
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
PACKAGES = ""
PACKAGES_virtclass-native = ""
PACKAGES_DYNAMIC = ""
PACKAGES_DYNAMIC_virtclass-native = ""
PACKAGE_ARCH = "${BUILD_ARCH}"
# used by cmake class
OECMAKE_RPATH = "${libdir}"
OECMAKE_RPATH_virtclass-native = "${libdir}"
# When this class has packaging enabled, setting
# RPROVIDES becomes unnecessary.
RPROVIDES = "${PN}"
TARGET_ARCH = "${BUILD_ARCH}"
TARGET_OS = "${BUILD_OS}"
TARGET_VENDOR = "${BUILD_VENDOR}"
TARGET_PREFIX = "${BUILD_PREFIX}"
TARGET_CC_ARCH = "${BUILD_CC_ARCH}"
TARGET_LD_ARCH = "${BUILD_LD_ARCH}"
TARGET_AS_ARCH = "${BUILD_AS_ARCH}"
TARGET_FPU = ""
HOST_ARCH = "${BUILD_ARCH}"
HOST_OS = "${BUILD_OS}"
HOST_VENDOR = "${BUILD_VENDOR}"
HOST_PREFIX = "${BUILD_PREFIX}"
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
CXXFLAGS = "${BUILD_CFLAGS}"
LDFLAGS = "${BUILD_LDFLAGS}"
LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
DEPENDS_GETTEXT = "gettext-native"
# Don't use site files for native builds
export CONFIG_SITE = ""
# set the compiler as well. It could have been set to something else
export CC = "${CCACHE}${HOST_PREFIX}gcc ${HOST_CC_ARCH}"
export CXX = "${CCACHE}${HOST_PREFIX}g++ ${HOST_CC_ARCH}"
export F77 = "${CCACHE}${HOST_PREFIX}g77 ${HOST_CC_ARCH}"
export CPP = "${HOST_PREFIX}gcc ${HOST_CC_ARCH} -E"
export LD = "${HOST_PREFIX}ld ${HOST_LD_ARCH} "
export CCLD = "${CC}"
export AR = "${HOST_PREFIX}ar"
export AS = "${HOST_PREFIX}as ${HOST_AS_ARCH}"
export RANLIB = "${HOST_PREFIX}ranlib"
export STRIP = "${HOST_PREFIX}strip"
# Path prefixes
base_prefix = "${STAGING_DIR_NATIVE}"
prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
exec_prefix = "${STAGING_DIR_NATIVE}${prefix_native}"
libdir = "${STAGING_DIR_NATIVE}${libdir_native}"
baselib = "lib"
# Libtool's default paths are correct for the native machine
lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
NATIVE_PACKAGE_PATH_SUFFIX = ""
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE}"
do_populate_sysroot[sstate-outputdirs] = "${STAGING_DIR_NATIVE}"
# Since we actually install these into situ there is no staging prefix
STAGING_DIR_HOST = ""
STAGING_DIR_TARGET = ""
SHLIBSDIR = "${STAGING_DIR_NATIVE}/shlibs"
PKG_CONFIG_DIR = "${libdir}/pkgconfig"
EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
PKG_CONFIG_SYSROOT_DIR = ""
# we dont want libc-uclibc or libc-glibc to kick in for native recipes
LIBCOVERRIDE = ""
PATH =. "${COREBASE}/scripts/native-intercept:"
python native_virtclass_handler () {
if not isinstance(e, bb.event.RecipePreFinalise):
return
classextend = e.data.getVar('BBCLASSEXTEND', True) or ""
if "native" not in classextend:
return
pn = e.data.getVar("PN", True)
if not pn.endswith("-native"):
return
def map_dependencies(varname, d, suffix = ""):
if suffix:
varname = varname + "_" + suffix
deps = d.getVar(varname, True)
if not deps:
return
deps = bb.utils.explode_deps(deps)
newdeps = []
for dep in deps:
if dep.endswith("-cross"):
newdeps.append(dep.replace("-cross", "-native"))
elif not dep.endswith("-native"):
newdeps.append(dep + "-native")
else:
newdeps.append(dep)
d.setVar(varname, " ".join(newdeps))
map_dependencies("DEPENDS", e.data)
for pkg in [e.data.getVar("PN", True), "", "${PN}"]:
map_dependencies("RDEPENDS", e.data, pkg)
map_dependencies("RRECOMMENDS", e.data, pkg)
map_dependencies("RSUGGESTS", e.data, pkg)
map_dependencies("RPROVIDES", e.data, pkg)
map_dependencies("RREPLACES", e.data, pkg)
provides = e.data.getVar("PROVIDES", True)
for prov in provides.split():
if prov.find(pn) != -1:
continue
if not prov.endswith("-native"):
provides = provides.replace(prov, prov + "-native")
e.data.setVar("PROVIDES", provides)
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
}
addhandler native_virtclass_handler
do_package[noexec] = "1"
do_package_write_ipk[noexec] = "1"
do_package_write_deb[noexec] = "1"
do_package_write_rpm[noexec] = "1"
do_populate_sysroot[stamp-extra-info] = ""
do_package[stamp-extra-info] = ""

View File

@@ -0,0 +1,114 @@
inherit relocatable
# SDK packages are built either explicitly by the user,
# or indirectly via dependency. No need to be in 'world'.
EXCLUDE_FROM_WORLD = "1"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
# we dont want libc-uclibc or libc-glibc to kick in for nativesdk recipes
LIBCOVERRIDE = ""
#
# Update PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
python () {
archs = d.getVar('PACKAGE_ARCHS', True).split()
sdkarchs = []
for arch in archs:
sdkarchs.append(arch + '-nativesdk')
d.setVar('PACKAGE_ARCHS', " ".join(sdkarchs))
}
STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"
HOST_OS = "${SDK_OS}"
HOST_PREFIX = "${SDK_PREFIX}"
HOST_CC_ARCH = "${SDK_CC_ARCH}"
HOST_LD_ARCH = "${SDK_LD_ARCH}"
HOST_AS_ARCH = "${SDK_AS_ARCH}"
#HOST_SYS = "${HOST_ARCH}${TARGET_VENDOR}-${HOST_OS}"
TARGET_ARCH = "${SDK_ARCH}"
TARGET_VENDOR = "${SDK_VENDOR}"
TARGET_OS = "${SDK_OS}"
TARGET_PREFIX = "${SDK_PREFIX}"
TARGET_CC_ARCH = "${SDK_CC_ARCH}"
TARGET_LD_ARCH = "${SDK_LD_ARCH}"
TARGET_AS_ARCH = "${SDK_AS_ARCH}"
TARGET_FPU = ""
CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
CFLAGS = "${BUILDSDK_CFLAGS}"
CXXFLAGS = "${BUILDSDK_CFLAGS}"
LDFLAGS = "${BUILDSDK_LDFLAGS}"
# Change to place files in SDKPATH
base_prefix = "${SDKPATHNATIVE}"
prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
baselib = "lib"
export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
python nativesdk_virtclass_handler () {
if not isinstance(e, bb.event.RecipePreFinalise):
return
pn = e.data.getVar("PN", True)
if not pn.endswith("-nativesdk"):
return
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
}
python () {
pn = d.getVar("PN", True)
if not pn.endswith("-nativesdk"):
return
def map_dependencies(varname, d, suffix = ""):
if suffix:
varname = varname + "_" + suffix
deps = d.getVar(varname, True)
if not deps:
return
deps = bb.utils.explode_deps(deps)
newdeps = []
for dep in deps:
if dep.endswith("-native") or dep.endswith("-cross"):
newdeps.append(dep)
elif dep.endswith("-gcc-intermediate") or dep.endswith("-gcc-initial") or dep.endswith("-gcc") or dep.endswith("-g++"):
newdeps.append(dep + "-crosssdk")
elif not dep.endswith("-nativesdk"):
newdeps.append(dep.replace("-nativesdk", "") + "-nativesdk")
else:
newdeps.append(dep)
d.setVar(varname, " ".join(newdeps))
map_dependencies("DEPENDS", d)
#for pkg in (d.getVar("PACKAGES", True).split() + [""]):
# map_dependencies("RDEPENDS", d, pkg)
# map_dependencies("RRECOMMENDS", d, pkg)
# map_dependencies("RSUGGESTS", d, pkg)
# map_dependencies("RPROVIDES", d, pkg)
# map_dependencies("RREPLACES", d, pkg)
provides = d.getVar("PROVIDES", True)
for prov in provides.split():
if prov.find(pn) != -1:
continue
if not prov.endswith("-nativesdk"):
provides = provides.replace(prov, prov + "-nativesdk")
d.setVar("PROVIDES", provides)
}
addhandler nativesdk_virtclass_handler
do_populate_sysroot[stamp-extra-info] = ""
do_package[stamp-extra-info] = ""

View File

@@ -0,0 +1,174 @@
addtask lint before do_fetch
do_lint[nostamp] = "1"
python do_lint() {
def testVar(var, explain=None):
try:
s = d[var]
return s["content"]
except KeyError:
bb.error("%s is not set" % var)
if explain: bb.note(explain)
return None
##############################
# Test that DESCRIPTION exists
#
testVar("DESCRIPTION")
##############################
# Test that HOMEPAGE exists
#
s = testVar("HOMEPAGE")
if s=="unknown":
bb.error("HOMEPAGE is not set")
elif not s.startswith("http://"):
bb.error("HOMEPAGE doesn't start with http://")
##############################
# Test for valid LICENSE
#
valid_licenses = {
"GPL-2" : "GPLv2",
"GPL LGPL FDL" : True,
"GPL PSF" : True,
"GPL/QPL" : True,
"GPL" : True,
"GPLv2" : True,
"IBM" : True,
"LGPL GPL" : True,
"LGPL" : True,
"MIT" : True,
"OSL" : True,
"Perl" : True,
"Public Domain" : True,
"QPL" : "GPL/QPL",
}
s = testVar("LICENSE")
if s=="unknown":
bb.error("LICENSE is not set")
elif s.startswith("Vendor"):
pass
else:
try:
newlic = valid_licenses[s]
if newlic == False:
bb.note("LICENSE '%s' is not recommended" % s)
elif newlic != True:
bb.note("LICENSE '%s' is not recommended, better use '%s'" % (s, newsect))
except:
bb.note("LICENSE '%s' is not recommended" % s)
##############################
# Test for valid MAINTAINER
#
s = testVar("MAINTAINER")
if s=="OpenEmbedded Team <openembedded-devel@openembedded.org>":
bb.error("explicit MAINTAINER is missing, using default")
elif s and s.find("@") == -1:
bb.error("You forgot to put an e-mail address into MAINTAINER")
##############################
# Test for valid SECTION
#
# if Correct section: True section name is valid
# False section name is invalid, no suggestion
# string section name is invalid, better name suggested
#
valid_sections = {
# Current Section Correct section
"apps" : True,
"audio" : True,
"base" : True,
"console/games" : True,
"console/net" : "console/network",
"console/network" : True,
"console/utils" : True,
"devel" : True,
"developing" : "devel",
"devel/python" : True,
"fonts" : True,
"games" : True,
"games/libs" : True,
"gnome/base" : True,
"gnome/libs" : True,
"gpe" : True,
"gpe/libs" : True,
"gui" : False,
"libc" : "libs",
"libs" : True,
"libs/net" : True,
"multimedia" : True,
"net" : "network",
"NET" : "network",
"network" : True,
"opie/applets" : True,
"opie/applications" : True,
"opie/base" : True,
"opie/codecs" : True,
"opie/decorations" : True,
"opie/fontfactories" : True,
"opie/fonts" : True,
"opie/games" : True,
"opie/help" : True,
"opie/inputmethods" : True,
"opie/libs" : True,
"opie/multimedia" : True,
"opie/pim" : True,
"opie/setting" : "opie/settings",
"opie/settings" : True,
"opie/Shell" : False,
"opie/styles" : True,
"opie/today" : True,
"scientific" : True,
"utils" : True,
"x11" : True,
"x11/libs" : True,
"x11/wm" : True,
}
s = testVar("SECTION")
if s:
try:
newsect = valid_sections[s]
if newsect == False:
bb.note("SECTION '%s' is not recommended" % s)
elif newsect != True:
bb.note("SECTION '%s' is not recommended, better use '%s'" % (s, newsect))
except:
bb.note("SECTION '%s' is not recommended" % s)
if not s.islower():
bb.error("SECTION should only use lower case")
##############################
# Test for valid PRIORITY
#
valid_priorities = {
"standard" : True,
"required" : True,
"optional" : True,
"extra" : True,
}
s = testVar("PRIORITY")
if s:
try:
newprio = valid_priorities[s]
if newprio == False:
bb.note("PRIORITY '%s' is not recommended" % s)
elif newprio != True:
bb.note("PRIORITY '%s' is not recommended, better use '%s'" % (s, newprio))
except:
bb.note("PRIORITY '%s' is not recommended" % s)
if not s.islower():
bb.error("PRIORITY should only use lower case")
}

View File

@@ -0,0 +1,12 @@
PREMIRRORS() {
cvs://.*/.* ${SOURCE_MIRROR_URL}
svn://.*/.* ${SOURCE_MIRROR_URL}
git://.*/.* ${SOURCE_MIRROR_URL}
hg://.*/.* ${SOURCE_MIRROR_URL}
bzr://.*/.* ${SOURCE_MIRROR_URL}
svk://.*/.* ${SOURCE_MIRROR_URL}
p4://.*/.* ${SOURCE_MIRROR_URL}
osc://.*/.* ${SOURCE_MIRROR_URL}
https?$://.*/.* ${SOURCE_MIRROR_URL}
ftp://.*/.* ${SOURCE_MIRROR_URL}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,431 @@
#
# Copyright 2006-2008 OpenedHand Ltd.
#
inherit package
IMAGE_PKGTYPE ?= "deb"
DPKG_ARCH ?= "${TARGET_ARCH}"
PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
python package_deb_fn () {
d.setVar('PKGFN', d.getVar('PKG'))
}
addtask package_deb_install
python do_package_deb_install () {
pkg = d.getVar('PKG', True)
pkgfn = d.getVar('PKGFN', True)
rootfs = d.getVar('IMAGE_ROOTFS', True)
debdir = d.getVar('DEPLOY_DIR_DEB', True)
apt_config = bb.data.expand('${STAGING_ETCDIR_NATIVE}/apt/apt.conf', d)
stagingbindir = d.getVar('STAGING_BINDIR_NATIVE', True)
tmpdir = d.getVar('TMPDIR', True)
if None in (pkg,pkgfn,rootfs):
raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGE_ROOTFS)")
try:
if not os.exists(rootfs):
os.makedirs(rootfs)
os.chdir(rootfs)
except OSError:
import sys
raise bb.build.FuncFailed(str(sys.exc_value))
# update packages file
(exitstatus, output) = commands.getstatusoutput('dpkg-scanpackages %s > %s/Packages' % (debdir, debdir))
if (exitstatus != 0 ):
raise bb.build.FuncFailed(output)
f = open(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"), "w")
f.close()
# NOTE: this env stuff is racy at best, we need something more capable
# than 'commands' for command execution, which includes manipulating the
# env of the fork+execve'd processs
# Set up environment
apt_config_backup = os.getenv('APT_CONFIG')
os.putenv('APT_CONFIG', apt_config)
path = os.getenv('PATH')
os.putenv('PATH', '%s:%s' % (stagingbindir, os.getenv('PATH')))
# install package
commands.getstatusoutput('apt-get update')
commands.getstatusoutput('apt-get install -y %s' % pkgfn)
# revert environment
os.putenv('APT_CONFIG', apt_config_backup)
os.putenv('PATH', path)
}
#
# Update the Packages index files in ${DEPLOY_DIR_DEB}
#
package_update_index_deb () {
local debarchs=""
if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
return
fi
for arch in ${PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}; do
if [ -e ${DEPLOY_DIR_DEB}/$arch ]; then
debarchs="$debarchs $arch"
fi
done
for arch in $debarchs; do
if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
continue;
fi
cd ${DEPLOY_DIR_DEB}/$arch
dpkg-scanpackages . | bzip2 > Packages.bz2
echo "Label: $arch" > Release
done
}
#
# install a bunch of packages using apt
# the following shell variables needs to be set before calling this func:
# INSTALL_ROOTFS_DEB - install root dir
# INSTALL_BASEARCH_DEB - install base architecutre
# INSTALL_ARCHS_DEB - list of available archs
# INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
# INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
# INSTALL_TASK_DEB - task name
package_install_internal_deb () {
local target_rootfs="${INSTALL_ROOTFS_DEB}"
local dpkg_arch="${INSTALL_BASEARCH_DEB}"
local archs="${INSTALL_ARCHS_DEB}"
local package_to_install="${INSTALL_PACKAGES_NORMAL_DEB}"
local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_DEB}"
local package_linguas="${INSTALL_PACKAGES_LINGUAS_DEB}"
local task="${INSTALL_TASK_DEB}"
rm -f ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
rm -f ${STAGING_ETCDIR_NATIVE}/apt/preferences
priority=1
for arch in $archs; do
if [ ! -d ${DEPLOY_DIR_DEB}/$arch ]; then
continue;
fi
echo "deb file:${DEPLOY_DIR_DEB}/$arch/ ./" >> ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
(echo "Package: *"
echo "Pin: release l=$arch"
echo "Pin-Priority: $(expr 800 + $priority)"
echo) >> ${STAGING_ETCDIR_NATIVE}/apt/preferences
priority=$(expr $priority + 5)
done
tac ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev > ${STAGING_ETCDIR_NATIVE}/apt/sources.list
cat "${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample" \
| sed -e "s#Architecture \".*\";#Architecture \"${dpkg_arch}\";#" \
| sed -e "s:#ROOTFS#:${target_rootfs}:g" \
> "${STAGING_ETCDIR_NATIVE}/apt/apt-${task}.conf"
export APT_CONFIG="${STAGING_ETCDIR_NATIVE}/apt/apt-${task}.conf"
mkdir -p ${target_rootfs}/var/lib/dpkg/info
mkdir -p ${target_rootfs}/var/lib/dpkg/updates
> ${target_rootfs}/var/lib/dpkg/status
> ${target_rootfs}/var/lib/dpkg/available
apt-get update
# Uclibc builds don't provide this stuff..
if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
if [ ! -z "${package_linguas}" ]; then
apt-get install glibc-localedata-i18n --force-yes --allow-unauthenticated
if [ $? -ne 0 ]; then
exit 1
fi
for i in ${package_linguas}; do
apt-get install $i --force-yes --allow-unauthenticated
if [ $? -ne 0 ]; then
exit 1
fi
done
fi
fi
# normal install
for i in ${package_to_install}; do
apt-get install $i --force-yes --allow-unauthenticated
if [ $? -ne 0 ]; then
exit 1
fi
done
rm -f ${WORKDIR}/temp/log.do_${task}-attemptonly.${PID}
if [ ! -z "${package_attemptonly}" ]; then
for i in ${package_attemptonly}; do
apt-get install $i --force-yes --allow-unauthenticated >> ${WORKDIR}/temp/log.do_${task}-attemptonly.${PID} 2>&1 || true
done
fi
find ${target_rootfs} -name \*.dpkg-new | for i in `cat`; do
mv $i `echo $i | sed -e's,\.dpkg-new$,,'`
done
# Mark all packages installed
sed -i -e "s/Status: install ok unpacked/Status: install ok installed/;" ${target_rootfs}/var/lib/dpkg/status
}
deb_log_check() {
target="$1"
lf_path="$2"
lf_txt="`cat $lf_path`"
for keyword_die in "E:"
do
if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
then
echo "log_check: There were error messages in the logfile"
echo -e "log_check: Matched keyword: [$keyword_die]\n"
echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
echo ""
do_exit=1
fi
done
test "$do_exit" = 1 && exit 1
true
}
python do_package_deb () {
import re, copy
import textwrap
workdir = d.getVar('WORKDIR', True)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
outdir = d.getVar('PKGWRITEDIRDEB', True)
if not outdir:
bb.error("PKGWRITEDIRDEB not defined, unable to package")
return
packages = d.getVar('PACKAGES', True)
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
tmpdir = d.getVar('TMPDIR', True)
if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
if packages == []:
bb.debug(1, "No packages; nothing to do")
return
pkgdest = d.getVar('PKGDEST', True)
for pkg in packages.split():
localdata = bb.data.createCopy(d)
root = "%s/%s" % (pkgdest, pkg)
lf = bb.utils.lockfile(root + ".lock")
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
pkgname = localdata.getVar('PKG_%s' % pkg, True)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
pkgoutdir = os.path.join(outdir, localdata.getVar('PACKAGE_ARCH', True))
bb.mkdirhier(pkgoutdir)
os.chdir(root)
from glob import glob
g = glob('*')
try:
del g[g.index('DEBIAN')]
del g[g.index('./DEBIAN')]
except ValueError:
pass
if not g and localdata.getVar('ALLOW_EMPTY') != "1":
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', True), localdata.getVar('PKGR', True)))
bb.utils.unlockfile(lf)
continue
controldir = os.path.join(root, 'DEBIAN')
bb.mkdirhier(controldir)
os.chmod(controldir, 0755)
try:
ctrlfile = file(os.path.join(controldir, 'control'), 'wb')
# import codecs
# ctrlfile = codecs.open("someFile", "w", "utf-8")
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open control file for writing.")
fields = []
pe = d.getVar('PKGE', True)
if pe and int(pe) > 0:
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
fields.append(["Description: %s\n", ['DESCRIPTION']])
fields.append(["Section: %s\n", ['SECTION']])
fields.append(["Priority: %s\n", ['PRIORITY']])
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
fields.append(["Architecture: %s\n", ['DPKG_ARCH']])
fields.append(["OE: %s\n", ['PN']])
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
# Package, Version, Maintainer, Description - mandatory
# Section, Priority, Essential, Architecture, Source, Depends, Pre-Depends, Recommends, Suggests, Conflicts, Replaces, Provides - Optional
def pullData(l, d):
l2 = []
for i in l:
data = d.getVar(i, True)
if data is None:
raise KeyError(f)
if i == 'DPKG_ARCH' and d.getVar('PACKAGE_ARCH', True) == 'all':
data = 'all'
l2.append(data)
return l2
ctrlfile.write("Package: %s\n" % pkgname)
# check for required fields
try:
for (c, fs) in fields:
for f in fs:
if localdata.getVar(f) is None:
raise KeyError(f)
# Special behavior for description...
if 'DESCRIPTION' in fs:
summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
description = localdata.getVar('DESCRIPTION', True) or "."
description = textwrap.dedent(description).strip()
ctrlfile.write('Description: %s\n' % unicode(summary))
ctrlfile.write('%s\n' % unicode(textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' ')))
else:
ctrlfile.write(unicode(c % tuple(pullData(fs, localdata))))
except KeyError:
import sys
(type, value, traceback) = sys.exc_info()
bb.utils.unlockfile(lf)
ctrlfile.close()
raise bb.build.FuncFailed("Missing field for deb generation: %s" % value)
# more fields
bb.build.exec_func("mapping_rename_hook", localdata)
rdepends = bb.utils.explode_dep_versions(localdata.getVar("RDEPENDS", True) or "")
for dep in rdepends:
if '*' in dep:
del rdepends[dep]
rrecommends = bb.utils.explode_dep_versions(localdata.getVar("RRECOMMENDS", True) or "")
for dep in rrecommends:
if '*' in dep:
del rrecommends[dep]
rsuggests = bb.utils.explode_dep_versions(localdata.getVar("RSUGGESTS", True) or "")
rprovides = bb.utils.explode_dep_versions(localdata.getVar("RPROVIDES", True) or "")
rreplaces = bb.utils.explode_dep_versions(localdata.getVar("RREPLACES", True) or "")
rconflicts = bb.utils.explode_dep_versions(localdata.getVar("RCONFLICTS", True) or "")
if rdepends:
ctrlfile.write("Depends: %s\n" % unicode(bb.utils.join_deps(rdepends)))
if rsuggests:
ctrlfile.write("Suggests: %s\n" % unicode(bb.utils.join_deps(rsuggests)))
if rrecommends:
ctrlfile.write("Recommends: %s\n" % unicode(bb.utils.join_deps(rrecommends)))
if rprovides:
ctrlfile.write("Provides: %s\n" % unicode(bb.utils.join_deps(rprovides)))
if rreplaces:
ctrlfile.write("Replaces: %s\n" % unicode(bb.utils.join_deps(rreplaces)))
if rconflicts:
ctrlfile.write("Conflicts: %s\n" % unicode(bb.utils.join_deps(rconflicts)))
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
scriptvar = localdata.getVar('pkg_%s' % script, True)
if not scriptvar:
continue
try:
scriptfile = file(os.path.join(controldir, script), 'w')
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
scriptfile.write("#!/bin/sh\n")
scriptfile.write(scriptvar)
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
conffiles_str = localdata.getVar("CONFFILES", True)
if conffiles_str:
try:
conffiles = file(os.path.join(controldir, 'conffiles'), 'w')
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open conffiles for writing.")
for f in conffiles_str.split():
conffiles.write('%s\n' % f)
conffiles.close()
os.chdir(basedir)
ret = os.system("PATH=\"%s\" dpkg-deb -b %s %s" % (localdata.getVar("PATH", True), root, pkgoutdir))
if ret != 0:
bb.utils.prunedir(controldir)
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("dpkg-deb execution failed")
bb.utils.prunedir(controldir)
bb.utils.unlockfile(lf)
}
SSTATETASKS += "do_package_write_deb"
do_package_write_deb[sstate-name] = "deploy-deb"
do_package_write_deb[sstate-inputdirs] = "${PKGWRITEDIRDEB}"
do_package_write_deb[sstate-outputdirs] = "${DEPLOY_DIR_DEB}"
python do_package_write_deb_setscene () {
sstate_setscene(d)
}
addtask do_package_write_deb_setscene
python () {
if d.getVar('PACKAGES', True) != '':
deps = (d.getVarFlag('do_package_write_deb', 'depends') or "").split()
deps.append('dpkg-native:do_populate_sysroot')
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package_write_deb', 'depends', " ".join(deps))
d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
d.setVarFlag('do_package_write_deb_setscene', 'fakeroot', "1")
# Map TARGET_ARCH to Debian's ideas about architectures
if d.getVar('DPKG_ARCH', True) in ["x86", "i486", "i586", "i686", "pentium"]:
d.setVar('DPKG_ARCH', 'i386')
}
python do_package_write_deb () {
bb.build.exec_func("read_subpackage_metadata", d)
bb.build.exec_func("do_package_deb", d)
}
do_package_write_deb[dirs] = "${PKGWRITEDIRDEB}"
addtask package_write_deb before do_package_write after do_package
PACKAGEINDEXES += "package_update_index_deb;"
PACKAGEINDEXDEPS += "dpkg-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "apt-native:do_populate_sysroot"

View File

@@ -0,0 +1,462 @@
inherit package
IMAGE_PKGTYPE ?= "ipk"
IPKGCONF_TARGET = "${WORKDIR}/opkg.conf"
IPKGCONF_SDK = "${WORKDIR}/opkg-sdk.conf"
PKGWRITEDIRIPK = "${WORKDIR}/deploy-ipks"
# Program to be used to build opkg packages
OPKGBUILDCMD ??= "opkg-build"
python package_ipk_fn () {
d.setVar('PKGFN', d.getVar('PKG'))
}
python package_ipk_install () {
pkg = d.getVar('PKG', 1)
pkgfn = d.getVar('PKGFN', 1)
rootfs = d.getVar('IMAGE_ROOTFS', 1)
ipkdir = d.getVar('DEPLOY_DIR_IPK', 1)
stagingdir = d.getVar('STAGING_DIR', 1)
tmpdir = d.getVar('TMPDIR', 1)
if None in (pkg,pkgfn,rootfs):
raise bb.build.FuncFailed("missing variables (one or more of PKG, PKGFN, IMAGEROOTFS)")
try:
bb.mkdirhier(rootfs)
os.chdir(rootfs)
except OSError:
import sys
(type, value, traceback) = sys.exc_info()
print value
raise bb.build.FuncFailed
# Generate ipk.conf if it or the stamp doesnt exist
conffile = os.path.join(stagingdir,"ipkg.conf")
if not os.access(conffile, os.R_OK):
ipkg_archs = d.getVar('PACKAGE_ARCHS')
if ipkg_archs is None:
bb.error("PACKAGE_ARCHS missing")
raise FuncFailed
ipkg_archs = ipkg_archs.split()
arch_priority = 1
f = open(conffile,"w")
for arch in ipkg_archs:
f.write("arch %s %s\n" % ( arch, arch_priority ))
arch_priority += 1
f.write("src local file:%s" % ipkdir)
f.close()
if not os.access(os.path.join(ipkdir,"Packages"), os.R_OK) or not os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"),os.R_OK):
ret = os.system('opkg-make-index -p %s %s ' % (os.path.join(ipkdir, "Packages"), ipkdir))
if (ret != 0 ):
raise bb.build.FuncFailed
f = open(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"),"w")
f.close()
ret = os.system('opkg-cl -o %s -f %s update' % (rootfs, conffile))
ret = os.system('opkg-cl -o %s -f %s install %s' % (rootfs, conffile, pkgfn))
if (ret != 0 ):
raise bb.build.FuncFailed
}
package_tryout_install_multilib_ipk() {
#try install multilib
multilib_tryout_dirs=""
for item in ${MULTILIB_VARIANTS}; do
local target_rootfs="${MULTILIB_TEMP_ROOTFS}/${item}"
local ipkg_args="-f ${INSTALL_CONF_IPK} -o ${target_rootfs} --force_overwrite"
local selected_pkg=""
local pkgname_prefix="${item}-"
local pkgname_len=${#pkgname_prefix}
for pkg in ${INSTALL_PACKAGES_MULTILIB_IPK}; do
local pkgname=$(echo $pkg | awk -v var=$pkgname_len '{ pkgname=substr($1, 1, var - 1); print pkgname; }' )
if [ ${pkgname} = ${pkgname_prefix} ]; then
selected_pkg="${selected_pkg} ${pkg}"
fi
done
if [ ! -z "${selected_pkg}" ]; then
rm -f ${target_rootfs}
mkdir -p ${target_rootfs}/${opkglibdir}
opkg-cl ${ipkg_args} update
opkg-cl ${ipkg_args} install ${selected_pkg}
multilib_tryout_dirs="${multilib_tryout_dirs} ${target_rootfs}"
fi
done
}
split_multilib_packages() {
INSTALL_PACKAGES_NORMAL_IPK=""
INSTALL_PACKAGES_MULTILIB_IPK=""
for pkg in ${INSTALL_PACKAGES_IPK}; do
is_multilib=0
for item in ${MULTILIB_VARIANTS}; do
local pkgname_prefix="${item}-"
local pkgname_len=${#pkgname_prefix}
local pkgname=$(echo $pkg | awk -v var=$pkgname_len '{ pkgname=substr($1, 1, var - 1); print pkgname; }' )
if [ ${pkgname} = ${pkgname_prefix} ]; then
is_multilib=1
break
fi
done
if [ ${is_multilib} = 0 ]; then
INSTALL_PACKAGES_NORMAL_IPK="${INSTALL_PACKAGES_NORMAL_IPK} ${pkg}"
else
INSTALL_PACKAGES_MULTILIB_IPK="${INSTALL_PACKAGES_MULTILIB_IPK} ${pkg}"
fi
done
}
#
# install a bunch of packages using opkg
# the following shell variables needs to be set before calling this func:
# INSTALL_ROOTFS_IPK - install root dir
# INSTALL_CONF_IPK - configuration file
# INSTALL_PACKAGES_IPK - packages to be installed
# INSTALL_PACKAGES_ATTEMPTONLY_IPK - packages attemped to be installed only
# INSTALL_PACKAGES_LINGUAS_IPK - additional packages for uclibc
# INSTALL_TASK_IPK - task name
package_install_internal_ipk() {
local target_rootfs="${INSTALL_ROOTFS_IPK}"
local conffile="${INSTALL_CONF_IPK}"
local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_IPK}"
local package_linguas="${INSTALL_PACKAGES_LINGUAS_IPK}"
local task="${INSTALL_TASK_IPK}"
split_multilib_packages
local package_to_install="${INSTALL_PACKAGES_NORMAL_IPK}"
local package_multilib="${INSTALL_PACKAGES_MULTILIB_IPK}"
mkdir -p ${target_rootfs}${localstatedir}/lib/opkg/
local ipkg_args="-f ${conffile} -o ${target_rootfs} --force-overwrite --force_postinstall"
opkg-cl ${ipkg_args} update
# Uclibc builds don't provide this stuff...
if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
if [ ! -z "${package_linguas}" ]; then
for i in ${package_linguas}; do
opkg-cl ${ipkg_args} install $i
done
fi
fi
if [ ! -z "${package_to_install}" ]; then
opkg-cl ${ipkg_args} install ${package_to_install}
fi
if [ ! -z "${package_attemptonly}" ]; then
opkg-cl ${ipkg_args} install ${package_attemptonly} > "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}" || true
fi
package_tryout_install_multilib_ipk
if [ ! -z "${MULTILIB_CHECK_FILE}" ]; then
#sanity check
multilib_sanity_check ${target_rootfs} ${multilib_tryout_dirs} || exit 1
fi
if [ ! -z "${package_multilib}" ]; then
opkg-cl ${ipkg_args} install ${package_multilib}
fi
}
ipk_log_check() {
target="$1"
lf_path="$2"
lf_txt="`cat $lf_path`"
for keyword_die in "exit 1" "Collected errors" ERR Fail
do
if (echo "$lf_txt" | grep -v log_check | grep "$keyword_die") >/dev/null 2>&1
then
echo "log_check: There were error messages in the logfile"
echo -e "log_check: Matched keyword: [$keyword_die]\n"
echo "$lf_txt" | grep -v log_check | grep -C 5 -i "$keyword_die"
echo ""
do_exit=1
fi
done
test "$do_exit" = 1 && exit 1
true
}
#
# Update the Packages index files in ${DEPLOY_DIR_IPK}
#
package_update_index_ipk () {
set -x
ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}"
if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
return
fi
packagedirs="${DEPLOY_DIR_IPK}"
for arch in $ipkgarchs; do
packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
done
multilib_archs="${MULTILIB_ARCHS}"
for arch in $multilib_archs; do
packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
done
for pkgdir in $packagedirs; do
if [ -e $pkgdir/ ]; then
touch $pkgdir/Packages
flock $pkgdir/Packages.flock -c "opkg-make-index -r $pkgdir/Packages -p $pkgdir/Packages -l $pkgdir/Packages.filelist -m $pkgdir/"
fi
done
}
#
# Generate an ipkg conf file ${IPKGCONF_TARGET} suitable for use against
# the target system and an ipkg conf file ${IPKGCONF_SDK} suitable for
# use against the host system in sdk builds
#
package_generate_ipkg_conf () {
package_generate_archlist
echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_SDK}
ipkgarchs="${SDK_PACKAGE_ARCHS}"
for arch in $ipkgarchs; do
if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_SDK}
fi
done
echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_TARGET}
ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS}"
for arch in $ipkgarchs; do
if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_TARGET}
fi
done
}
package_generate_archlist () {
ipkgarchs="${SDK_PACKAGE_ARCHS}"
priority=1
for arch in $ipkgarchs; do
echo "arch $arch $priority" >> ${IPKGCONF_SDK}
priority=$(expr $priority + 5)
done
ipkgarchs="${ALL_MULTILIB_PACKAGE_ARCHS}"
priority=1
for arch in $ipkgarchs; do
echo "arch $arch $priority" >> ${IPKGCONF_TARGET}
priority=$(expr $priority + 5)
done
}
python do_package_ipk () {
import re, copy
import textwrap
workdir = d.getVar('WORKDIR', True)
outdir = d.getVar('PKGWRITEDIRIPK', True)
tmpdir = d.getVar('TMPDIR', True)
pkgdest = d.getVar('PKGDEST', True)
if not workdir or not outdir or not tmpdir:
bb.error("Variables incorrectly set, unable to package")
return
packages = d.getVar('PACKAGES', True)
if not packages or packages == '':
bb.debug(1, "No packages; nothing to do")
return
# We're about to add new packages so the index needs to be checked
# so remove the appropriate stamp file.
if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
for pkg in packages.split():
localdata = bb.data.createCopy(d)
root = "%s/%s" % (pkgdest, pkg)
lf = bb.utils.lockfile(root + ".lock")
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
pkgname = localdata.getVar('PKG_%s' % pkg, 1)
if not pkgname:
pkgname = pkg
localdata.setVar('PKG', pkgname)
localdata.setVar('OVERRIDES', pkg)
bb.data.update_data(localdata)
basedir = os.path.join(os.path.dirname(root))
arch = localdata.getVar('PACKAGE_ARCH', 1)
pkgoutdir = "%s/%s" % (outdir, arch)
bb.mkdirhier(pkgoutdir)
os.chdir(root)
from glob import glob
g = glob('*')
try:
del g[g.index('CONTROL')]
del g[g.index('./CONTROL')]
except ValueError:
pass
if not g and localdata.getVar('ALLOW_EMPTY') != "1":
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', 1), localdata.getVar('PKGR', 1)))
bb.utils.unlockfile(lf)
continue
controldir = os.path.join(root, 'CONTROL')
bb.mkdirhier(controldir)
try:
ctrlfile = file(os.path.join(controldir, 'control'), 'w')
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open control file for writing.")
fields = []
pe = d.getVar('PKGE', 1)
if pe and int(pe) > 0:
fields.append(["Version: %s:%s-%s\n", ['PKGE', 'PKGV', 'PKGR']])
else:
fields.append(["Version: %s-%s\n", ['PKGV', 'PKGR']])
fields.append(["Description: %s\n", ['DESCRIPTION']])
fields.append(["Section: %s\n", ['SECTION']])
fields.append(["Priority: %s\n", ['PRIORITY']])
fields.append(["Maintainer: %s\n", ['MAINTAINER']])
fields.append(["License: %s\n", ['LICENSE']])
fields.append(["Architecture: %s\n", ['PACKAGE_ARCH']])
fields.append(["OE: %s\n", ['PN']])
fields.append(["Homepage: %s\n", ['HOMEPAGE']])
def pullData(l, d):
l2 = []
for i in l:
l2.append(d.getVar(i, 1))
return l2
ctrlfile.write("Package: %s\n" % pkgname)
# check for required fields
try:
for (c, fs) in fields:
for f in fs:
if localdata.getVar(f) is None:
raise KeyError(f)
# Special behavior for description...
if 'DESCRIPTION' in fs:
summary = localdata.getVar('SUMMARY', True) or localdata.getVar('DESCRIPTION', True) or "."
description = localdata.getVar('DESCRIPTION', True) or "."
description = textwrap.dedent(description).strip()
ctrlfile.write('Description: %s\n' % summary)
ctrlfile.write('%s\n' % textwrap.fill(description, width=74, initial_indent=' ', subsequent_indent=' '))
else:
ctrlfile.write(c % tuple(pullData(fs, localdata)))
except KeyError:
import sys
(type, value, traceback) = sys.exc_info()
ctrlfile.close()
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("Missing field for ipk generation: %s" % value)
# more fields
bb.build.exec_func("mapping_rename_hook", localdata)
rdepends = bb.utils.explode_dep_versions(localdata.getVar("RDEPENDS", 1) or "")
rrecommends = bb.utils.explode_dep_versions(localdata.getVar("RRECOMMENDS", 1) or "")
rsuggests = bb.utils.explode_dep_versions(localdata.getVar("RSUGGESTS", 1) or "")
rprovides = bb.utils.explode_dep_versions(localdata.getVar("RPROVIDES", 1) or "")
rreplaces = bb.utils.explode_dep_versions(localdata.getVar("RREPLACES", 1) or "")
rconflicts = bb.utils.explode_dep_versions(localdata.getVar("RCONFLICTS", 1) or "")
if rdepends:
ctrlfile.write("Depends: %s\n" % bb.utils.join_deps(rdepends))
if rsuggests:
ctrlfile.write("Suggests: %s\n" % bb.utils.join_deps(rsuggests))
if rrecommends:
ctrlfile.write("Recommends: %s\n" % bb.utils.join_deps(rrecommends))
if rprovides:
ctrlfile.write("Provides: %s\n" % bb.utils.join_deps(rprovides))
if rreplaces:
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
if rconflicts:
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
src_uri = localdata.getVar("SRC_URI", 1)
if src_uri:
src_uri = re.sub("\s+", " ", src_uri)
ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
ctrlfile.close()
for script in ["preinst", "postinst", "prerm", "postrm"]:
scriptvar = localdata.getVar('pkg_%s' % script, 1)
if not scriptvar:
continue
try:
scriptfile = file(os.path.join(controldir, script), 'w')
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open %s script file for writing." % script)
scriptfile.write(scriptvar)
scriptfile.close()
os.chmod(os.path.join(controldir, script), 0755)
conffiles_str = localdata.getVar("CONFFILES", 1)
if conffiles_str:
try:
conffiles = file(os.path.join(controldir, 'conffiles'), 'w')
except OSError:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("unable to open conffiles for writing.")
for f in conffiles_str.split():
conffiles.write('%s\n' % f)
conffiles.close()
os.chdir(basedir)
ret = os.system("PATH=\"%s\" %s %s %s" % (localdata.getVar("PATH", 1),
d.getVar("OPKGBUILDCMD",1), pkg, pkgoutdir))
if ret != 0:
bb.utils.unlockfile(lf)
raise bb.build.FuncFailed("opkg-build execution failed")
bb.utils.prunedir(controldir)
bb.utils.unlockfile(lf)
}
SSTATETASKS += "do_package_write_ipk"
do_package_write_ipk[sstate-name] = "deploy-ipk"
do_package_write_ipk[sstate-inputdirs] = "${PKGWRITEDIRIPK}"
do_package_write_ipk[sstate-outputdirs] = "${DEPLOY_DIR_IPK}"
python do_package_write_ipk_setscene () {
sstate_setscene(d)
}
addtask do_package_write_ipk_setscene
python () {
if d.getVar('PACKAGES', True) != '':
deps = (d.getVarFlag('do_package_write_ipk', 'depends') or "").split()
deps.append('opkg-utils-native:do_populate_sysroot')
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package_write_ipk', 'depends', " ".join(deps))
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
d.setVarFlag('do_package_write_ipk_setscene', 'fakeroot', "1")
}
python do_package_write_ipk () {
bb.build.exec_func("read_subpackage_metadata", d)
bb.build.exec_func("do_package_ipk", d)
}
do_package_write_ipk[dirs] = "${PKGWRITEDIRIPK}"
addtask package_write_ipk before do_package_write after do_package
PACKAGEINDEXES += "package_update_index_ipk;"
PACKAGEINDEXDEPS += "opkg-utils-native:do_populate_sysroot"
PACKAGEINDEXDEPS += "opkg-native:do_populate_sysroot"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,108 @@
inherit package
IMAGE_PKGTYPE ?= "tar"
python package_tar_fn () {
fn = os.path.join(d.getVar('DEPLOY_DIR_TAR'), "%s-%s-%s.tar.gz" % (d.getVar('PKG'), d.getVar('PKGV'), d.getVar('PKGR')))
fn = bb.data.expand(fn, d)
d.setVar('PKGFN', fn)
}
python package_tar_install () {
pkg = d.getVar('PKG', 1)
pkgfn = d.getVar('PKGFN', 1)
rootfs = d.getVar('IMAGE_ROOTFS', 1)
if None in (pkg,pkgfn,rootfs):
bb.error("missing variables (one or more of PKG, PKGFN, IMAGEROOTFS)")
raise bb.build.FuncFailed
try:
bb.mkdirhier(rootfs)
os.chdir(rootfs)
except OSError:
import sys
(type, value, traceback) = sys.exc_info()
print value
raise bb.build.FuncFailed
if not os.access(pkgfn, os.R_OK):
bb.debug(1, "%s does not exist, skipping" % pkgfn)
raise bb.build.FuncFailed
ret = os.system('zcat %s | tar -xf -' % pkgfn)
if ret != 0:
raise bb.build.FuncFailed
}
python do_package_tar () {
workdir = d.getVar('WORKDIR', 1)
if not workdir:
bb.error("WORKDIR not defined, unable to package")
return
outdir = d.getVar('DEPLOY_DIR_TAR', 1)
if not outdir:
bb.error("DEPLOY_DIR_TAR not defined, unable to package")
return
bb.mkdirhier(outdir)
dvar = d.getVar('D', 1)
if not dvar:
bb.error("D not defined, unable to package")
return
bb.mkdirhier(dvar)
packages = d.getVar('PACKAGES', 1)
if not packages:
bb.debug(1, "PACKAGES not defined, nothing to package")
return
for pkg in packages.split():
localdata = bb.data.createCopy(d)
root = "%s/install/%s" % (workdir, pkg)
localdata.setVar('ROOT', '')
localdata.setVar('ROOT_%s' % pkg, root)
localdata.setVar('PKG', pkg)
overrides = localdata.getVar('OVERRIDES')
if not overrides:
raise bb.build.FuncFailed('OVERRIDES not defined')
overrides = bb.data.expand(overrides, localdata)
localdata.setVar('OVERRIDES', '%s:%s' % (overrides, pkg))
bb.data.update_data(localdata)
root = localdata.getVar('ROOT')
bb.mkdirhier(root)
basedir = os.path.dirname(root)
pkgoutdir = outdir
bb.mkdirhier(pkgoutdir)
bb.build.exec_func('package_tar_fn', localdata)
tarfn = localdata.getVar('PKGFN', 1)
os.chdir(root)
from glob import glob
if not glob('*'):
bb.note("Not creating empty archive for %s-%s-%s" % (pkg, localdata.getVar('PKGV', 1), localdata.getVar('PKGR', 1)))
continue
ret = os.system("tar -czf %s %s" % (tarfn, '.'))
if ret != 0:
bb.error("Creation of tar %s failed." % tarfn)
}
python () {
if d.getVar('PACKAGES', True) != '':
deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
deps.append('tar-native:do_populate_sysroot')
deps.append('virtual/fakeroot-native:do_populate_sysroot')
d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
d.setVarFlag('do_package_write_ipk', 'fakeroot', "1")
}
python do_package_write_tar () {
bb.build.exec_func("read_subpackage_metadata", d)
bb.build.exec_func("do_package_tar", d)
}
do_package_write_tar[dirs] = "${D}"
addtask package_write_tar before do_build after do_package

View File

@@ -0,0 +1,13 @@
python read_subpackage_metadata () {
import oe.packagedata
data = oe.packagedata.read_pkgdata(d.getVar('PN', 1), d)
for key in data.keys():
d.setVar(key, data[key])
for pkg in d.getVar('PACKAGES', 1).split():
sdata = oe.packagedata.read_subpkgdata(pkg, d)
for key in sdata.keys():
d.setVar(key, sdata[key])
}

View File

@@ -0,0 +1,169 @@
# Copyright (C) 2006 OpenedHand LTD
# Point to an empty file so any user's custom settings don't break things
QUILTRCFILE ?= "${STAGING_BINDIR_NATIVE}/quiltrc"
PATCHDEPENDENCY = "${PATCHTOOL}-native:do_populate_sysroot"
inherit terminal
def src_patches(d):
workdir = d.getVar('WORKDIR', True)
fetch = bb.fetch2.Fetch([], d)
patches = []
for url in fetch.urls:
local = patch_path(url, fetch, workdir)
if not local:
continue
urldata = fetch.ud[url]
parm = urldata.parm
patchname = parm.get('pname') or os.path.basename(local)
apply, reason = should_apply(parm, d)
if not apply:
if reason:
bb.note("Patch %s %s" % (patchname, reason))
continue
patchparm = {'patchname': patchname}
if "striplevel" in parm:
striplevel = parm["striplevel"]
elif "pnum" in parm:
#bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
striplevel = parm["pnum"]
else:
striplevel = '1'
patchparm['striplevel'] = striplevel
patchdir = parm.get('patchdir')
if patchdir:
patchparm['patchdir'] = patchdir
localurl = bb.encodeurl(('file', '', local, '', '', patchparm))
patches.append(localurl)
return patches
def patch_path(url, fetch, workdir):
"""Return the local path of a patch, or None if this isn't a patch"""
local = fetch.localpath(url)
base, ext = os.path.splitext(os.path.basename(local))
if ext in ('.gz', '.bz2', '.Z'):
local = os.path.join(workdir, base)
ext = os.path.splitext(base)[1]
urldata = fetch.ud[url]
if "apply" in urldata.parm:
apply = oe.types.boolean(urldata.parm["apply"])
if not apply:
return
elif ext not in (".diff", ".patch"):
return
return local
def should_apply(parm, d):
"""Determine if we should apply the given patch"""
if "mindate" in parm or "maxdate" in parm:
pn = d.getVar('PN', True)
srcdate = d.getVar('SRCDATE_%s' % pn, True)
if not srcdate:
srcdate = d.getVar('SRCDATE', True)
if srcdate == "now":
srcdate = d.getVar('DATE', True)
if "maxdate" in parm and parm["maxdate"] < srcdate:
return False, 'is outdated'
if "mindate" in parm and parm["mindate"] > srcdate:
return False, 'is predated'
if "minrev" in parm:
srcrev = d.getVar('SRCREV', True)
if srcrev and srcrev < parm["minrev"]:
return False, 'applies to later revisions'
if "maxrev" in parm:
srcrev = d.getVar('SRCREV', True)
if srcrev and srcrev > parm["maxrev"]:
return False, 'applies to earlier revisions'
if "rev" in parm:
srcrev = d.getVar('SRCREV', True)
if srcrev and parm["rev"] not in srcrev:
return False, "doesn't apply to revision"
if "notrev" in parm:
srcrev = d.getVar('SRCREV', True)
if srcrev and parm["notrev"] in srcrev:
return False, "doesn't apply to revision"
return True, None
should_apply[vardepsexclude] = "DATE SRCDATE"
python patch_do_patch() {
import oe.patch
patchsetmap = {
"patch": oe.patch.PatchTree,
"quilt": oe.patch.QuiltTree,
"git": oe.patch.GitApplyTree,
}
cls = patchsetmap[d.getVar('PATCHTOOL', True) or 'quilt']
resolvermap = {
"noop": oe.patch.NOOPResolver,
"user": oe.patch.UserResolver,
}
rcls = resolvermap[d.getVar('PATCHRESOLVE', True) or 'user']
classes = {}
s = d.getVar('S', True)
path = os.getenv('PATH')
os.putenv('PATH', d.getVar('PATH', True))
for patch in src_patches(d):
_, _, local, _, _, parm = bb.decodeurl(patch)
if "patchdir" in parm:
patchdir = parm["patchdir"]
if not os.path.isabs(patchdir):
patchdir = os.path.join(s, patchdir)
else:
patchdir = s
if not patchdir in classes:
patchset = cls(patchdir, d)
resolver = rcls(patchset, oe_terminal)
classes[patchdir] = (patchset, resolver)
patchset.Clean()
else:
patchset, resolver = classes[patchdir]
bb.note("Applying patch '%s' (%s)" % (parm['patchname'], oe.path.format_display(local, d)))
try:
patchset.Import({"file":local, "strippath": parm['striplevel']}, True)
except Exception as exc:
bb.fatal(str(exc))
try:
resolver.Resolve()
except bb.BBHandledException as e:
bb.fatal(str(e))
}
patch_do_patch[vardepsexclude] = "PATCHRESOLVE"
addtask patch after do_unpack
do_patch[dirs] = "${WORKDIR}"
do_patch[depends] = "${PATCHDEPENDENCY}"
EXPORT_FUNCTIONS do_patch

View File

@@ -0,0 +1,3 @@
PATH_prepend = "${STAGING_BINDIR_NATIVE}/perl-native:"
DEPENDS += "perl-native"
OECMAKE_PERLNATIVE_DIR = "${STAGING_BINDIR_NATIVE}/perl-native"

View File

@@ -0,0 +1,29 @@
PKG_DISTRIBUTECOMMAND[func] = "1"
python do_distribute_packages () {
cmd = d.getVar('PKG_DISTRIBUTECOMMAND', 1)
if not cmd:
raise bb.build.FuncFailed("Unable to distribute packages, PKG_DISTRIBUTECOMMAND not defined")
bb.build.exec_func('PKG_DISTRIBUTECOMMAND', d)
}
addtask distribute_packages before do_build after do_fetch
PKG_DIST_LOCAL ?= "symlink"
PKG_DISTRIBUTEDIR ?= "${DEPLOY_DIR}/packages"
PKG_DISTRIBUTECOMMAND () {
p=`dirname ${FILE}`
d=`basename $p`
mkdir -p ${PKG_DISTRIBUTEDIR}
case "${PKG_DIST_LOCAL}" in
copy)
# use this weird tar command to copy because we want to
# exclude the BitKeeper directories
test -e ${PKG_DISTRIBUTEDIR}/${d} || mkdir ${PKG_DISTRIBUTEDIR}/${d};
(cd ${p}; tar -c --exclude SCCS -f - . ) | tar -C ${PKG_DISTRIBUTEDIR}/${d} -xpf -
;;
symlink)
ln -sf $p ${PKG_DISTRIBUTEDIR}/
;;
esac
}

View File

@@ -0,0 +1,22 @@
python do_pkg_write_metainfo () {
deploydir = d.getVar('DEPLOY_DIR', 1)
if not deploydir:
bb.error("DEPLOY_DIR not defined, unable to write package info")
return
try:
infofile = file(os.path.join(deploydir, 'package-metainfo'), 'a')
except OSError:
raise bb.build.FuncFailed("unable to open package-info file for writing.")
name = d.getVar('PN', 1)
version = d.getVar('PV', 1)
desc = d.getVar('DESCRIPTION', 1)
page = d.getVar('HOMEPAGE', 1)
lic = d.getVar('LICENSE', 1)
infofile.write("|| "+ name +" || "+ version + " || "+ desc +" || "+ page +" || "+ lic + " ||\n" )
infofile.close()
}
addtask pkg_write_metainfo after do_package before do_build

View File

@@ -0,0 +1 @@
DEPENDS_prepend = "pkgconfig-native "

View File

@@ -0,0 +1,89 @@
inherit meta toolchain-scripts
inherit populate_sdk_${IMAGE_PKGTYPE}
SDK_DIR = "${WORKDIR}/sdk"
SDK_OUTPUT = "${SDK_DIR}/image"
SDK_DEPLOY = "${TMPDIR}/deploy/sdk"
SDKTARGETSYSROOT = "${SDKPATH}/sysroots/${MULTIMACH_TARGET_SYS}"
TOOLCHAIN_HOST_TASK ?= "task-sdk-host-nativesdk task-cross-canadian-${TRANSLATED_TARGET_ARCH}"
TOOLCHAIN_TARGET_TASK ?= "task-core-standalone-sdk-target task-core-standalone-sdk-target-dbg"
TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${DISTRO_VERSION}"
RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
DEPENDS = "virtual/fakeroot-native sed-native"
PID = "${@os.getpid()}"
EXCLUDE_FROM_WORLD = "1"
python () {
# If we don't do this we try and run the mapping hooks while parsing which is slow
# bitbake should really provide something to let us know this...
if bb.data.getVar('BB_WORKERCONTEXT', d, True) is not None:
runtime_mapping_rename("TOOLCHAIN_TARGET_TASK", d)
}
fakeroot do_populate_sdk() {
rm -rf ${SDK_OUTPUT}
mkdir -p ${SDK_OUTPUT}
# populate_sdk_<image> is required to construct two images:
# SDK_ARCH-nativesdk - contains the cross compiler and associated tooling
# target - contains a target rootfs configured for the SDK usage
#
# the output of populate_sdk_<image> should end up in ${SDK_OUTPUT} it is made
# up of:
# ${SDK_OUTPUT}/<sdk_arch-nativesdk pkgs>
# ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/<target pkgs>
populate_sdk_${IMAGE_PKGTYPE}
# Don't ship any libGL in the SDK
rm -rf ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/libGL*
# Can copy pstage files here
# target_pkgs=`cat ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/var/lib/opkg/status | grep Package: | cut -f 2 -d ' '`
# Fix or remove broken .la files
#rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}/lib/*.la
rm -f ${SDK_OUTPUT}/${SDKPATHNATIVE}${libdir_nativesdk}/*.la
# Link the ld.so.cache file into the hosts filesystem
ln -s /etc/ld.so.cache ${SDK_OUTPUT}/${SDKPATHNATIVE}/etc/ld.so.cache
# Setup site file for external use
toolchain_create_sdk_siteconfig ${SDK_OUTPUT}/${SDKPATH}/site-config-${MULTIMACH_TARGET_SYS}
toolchain_create_sdk_env_script
# Add version information
toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${MULTIMACH_TARGET_SYS}
# Package it up
mkdir -p ${SDK_DEPLOY}
cd ${SDK_OUTPUT}
tar --owner=root --group=root -cj --file=${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 .
}
populate_sdk_log_check() {
for target in $*
do
lf_path="${WORKDIR}/temp/log.do_$target.${PID}"
echo "log_check: Using $lf_path as logfile"
if test -e "$lf_path"
then
${IMAGE_PKGTYPE}_log_check $target $lf_path
else
echo "Cannot find logfile [$lf_path]"
fi
echo "Logfile is clean"
done
}
do_populate_sdk[nostamp] = "1"
do_populate_sdk[recrdeptask] = "do_package_write"
addtask populate_sdk before do_build after do_install

View File

@@ -0,0 +1,61 @@
do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
do_populate_sdk[recrdeptask] += "do_package_write_deb"
DEB_SDK_ARCH = "${@[d.getVar('SDK_ARCH', 1), "i386"]\
[d.getVar('SDK_ARCH', 1) in \
["x86", "i486", "i586", "i686", "pentium"]]}"
populate_sdk_post_deb () {
local target_rootfs=$1
tar -cf - -C ${STAGING_ETCDIR_NATIVE} -ps apt | tar -xf - -C ${target_rootfs}/etc
}
populate_sdk_deb () {
# update index
package_update_index_deb
## install target ##
# This needs to work in the same way as rootfs_deb.bbclass
echo "Installing TARGET packages"
mkdir -p ${IMAGE_ROOTFS}/var/dpkg/alternatives
export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
export INSTALL_PACKAGES_DEB="${TOOLCHAIN_TARGET_TASK}"
export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
export PACKAGES_LINGUAS_DEB=""
export INSTALL_TASK_DEB="populate_sdk-target"
package_install_internal_deb
populate_sdk_post_deb ${INSTALL_ROOTFS_DEB}
populate_sdk_log_check populate_sdk
## install nativesdk ##
echo "Installing NATIVESDK packages"
export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}"
export INSTALL_BASEARCH_DEB="${DEB_SDK_ARCH}"
export INSTALL_ARCHS_DEB="${SDK_PACKAGE_ARCHS}"
export INSTALL_PACKAGES_DEB="${TOOLCHAIN_HOST_TASK}"
export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
export PACKAGES_LINGUAS_DEB=""
export INSTALL_TASK_DEB="populate_sdk-nativesdk"
package_install_internal_deb
populate_sdk_post_deb ${SDK_OUTPUT}/${SDKPATHNATIVE}
#move remainings
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/lib/dpkg
mv ${SDK_OUTPUT}/var/lib/dpkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/var/lib/dpkg
rm -rf ${SDK_OUTPUT}/var
populate_sdk_log_check populate_sdk
}

View File

@@ -0,0 +1,49 @@
do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
do_populate_sdk[recrdeptask] += "do_package_write_ipk"
populate_sdk_ipk() {
rm -f ${IPKGCONF_TARGET}
touch ${IPKGCONF_TARGET}
rm -f ${IPKGCONF_SDK}
touch ${IPKGCONF_SDK}
package_update_index_ipk
package_generate_ipkg_conf
export INSTALL_PACKAGES_ATTEMPTONLY_IPK=""
export INSTALL_PACKAGES_LINGUAS_IPK=""
export INSTALL_TASK_IPK="populate_sdk"
#install target
export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
export INSTALL_CONF_IPK="${IPKGCONF_TARGET}"
export INSTALL_PACKAGES_IPK="${TOOLCHAIN_TARGET_TASK}"
export D=${INSTALL_ROOTFS_IPK}
export OFFLINE_ROOT=${INSTALL_ROOTFS_IPK}
export IPKG_OFFLINE_ROOT=${INSTALL_ROOTFS_IPK}
export OPKG_OFFLINE_ROOT=${IPKG_OFFLINE_ROOT}
package_install_internal_ipk
#install host
export INSTALL_ROOTFS_IPK="${SDK_OUTPUT}"
export INSTALL_CONF_IPK="${IPKGCONF_SDK}"
export INSTALL_PACKAGES_IPK="${TOOLCHAIN_HOST_TASK}"
package_install_internal_ipk
#post clean up
install -d ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}
install -m 0644 ${IPKGCONF_TARGET} ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/${sysconfdir}/
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
install -m 0644 ${IPKGCONF_SDK} ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg
mv ${SDK_OUTPUT}/var/lib/opkg/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/opkg/
rm -Rf ${SDK_OUTPUT}/var
populate_sdk_log_check populate_sdk
}

View File

@@ -0,0 +1,132 @@
do_populate_sdk[depends] += "rpm-native:do_populate_sysroot"
do_populate_sdk[recrdeptask] += "do_package_write_rpm"
rpmlibdir = "/var/lib/rpm"
RPMOPTS="--dbpath ${rpmlibdir} --define='_openall_before_chroot 1'"
RPM="rpm ${RPMOPTS}"
do_populate_sdk[lockfiles] += "${DEPLOY_DIR_RPM}/rpm.lock"
populate_sdk_post_rpm () {
local target_rootfs=$1
# remove lock files
rm -f ${target_rootfs}/__db.*
# Move manifests into the directory with the logs
mv ${target_rootfs}/install/*.manifest ${T}/
# Remove all remaining resolver files
rm -rf ${target_rootfs}/install
}
populate_sdk_rpm () {
package_update_index_rpm
package_generate_rpm_conf
## install target ##
# This needs to work in the same way as rootfs_rpm.bbclass!
#
export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
export INSTALL_PLATFORM_RPM="${TARGET_ARCH}"
export INSTALL_CONFBASE_RPM="${RPMCONF_TARGET_BASE}"
export INSTALL_PACKAGES_RPM="${TOOLCHAIN_TARGET_TASK}"
export INSTALL_PACKAGES_ATTEMPTONLY_RPM=""
export INSTALL_PACKAGES_LINGUAS_RPM=""
export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig pkgconfig(pkg-config)"
export INSTALL_TASK_RPM="populate_sdk-target"
# Setup base system configuration
mkdir -p ${INSTALL_ROOTFS_RPM}/etc/rpm/
mkdir -p ${INSTALL_ROOTFS_RPM}${rpmlibdir}
mkdir -p ${INSTALL_ROOTFS_RPM}${rpmlibdir}/log
cat > ${INSTALL_ROOTFS_RPM}${rpmlibdir}/DB_CONFIG << EOF
# ================ Environment
set_data_dir .
set_create_dir .
set_lg_dir ./log
set_tmp_dir ./tmp
# -- thread_count must be >= 8
set_thread_count 64
# ================ Logging
# ================ Memory Pool
set_mp_mmapsize 268435456
# ================ Locking
set_lk_max_locks 16384
set_lk_max_lockers 16384
set_lk_max_objects 16384
mutex_set_max 163840
# ================ Replication
EOF
# List must be prefered to least preferred order
INSTALL_PLATFORM_EXTRA_RPM=""
for each_arch in ${MULTILIB_PACKAGE_ARCHS} ${PACKAGE_ARCHS} ; do
INSTALL_PLATFORM_EXTRA_RPM="$each_arch $INSTALL_PLATFORM_EXTRA_RPM"
done
export INSTALL_PLATFORM_EXTRA_RPM
package_install_internal_rpm
populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
## install nativesdk ##
echo "Installing NATIVESDK packages"
export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}"
export INSTALL_PLATFORM_RPM="${SDK_ARCH}"
export INSTALL_CONFBASE_RPM="${RPMCONF_HOST_BASE}"
export INSTALL_PACKAGES_RPM="${TOOLCHAIN_HOST_TASK}"
export INSTALL_PACKAGES_ATTEMPTONLY_RPM=""
export INSTALL_PACKAGES_LINGUAS_RPM=""
export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig libGL.so()(64bit) libGL.so"
export INSTALL_TASK_RPM="populate_sdk_rpm-nativesdk"
# List must be prefered to least preferred order
INSTALL_PLATFORM_EXTRA_RPM=""
for each_arch in ${SDK_PACKAGE_ARCHS} ; do
INSTALL_PLATFORM_EXTRA_RPM="$each_arch $INSTALL_PLATFORM_EXTRA_RPM"
done
export INSTALL_PLATFORM_EXTRA_RPM
package_install_internal_rpm
populate_sdk_post_rpm ${INSTALL_ROOTFS_RPM}
# move host RPM library data
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm
mv ${SDK_OUTPUT}${rpmlibdir}/* ${SDK_OUTPUT}/${SDKPATHNATIVE}${localstatedir_nativesdk}/lib/rpm/
rm -Rf ${SDK_OUTPUT}/var
install -d ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}
mv ${SDK_OUTPUT}/etc/* ${SDK_OUTPUT}/${SDKPATHNATIVE}/${sysconfdir}/
rm -rf ${SDK_OUTPUT}/etc
populate_sdk_log_check populate_sdk
# Workaround so the parser knows we need the resolve_package function!
if false ; then
resolve_package_rpm foo ${RPMCONF_TARGET_BASE}.conf || true
fi
}
python () {
ml_package_archs = ""
multilibs = d.getVar('MULTILIBS', True) or ""
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1 and eext[0] == 'multilib':
localdata = bb.data.createCopy(d)
overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + eext[1]
localdata.setVar("OVERRIDES", overrides)
# TEMP: OVERRIDES isn't working right
localdata.setVar("DEFAULTTUNE", localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + eext[1], False) or "")
ml_package_archs += localdata.getVar("PACKAGE_ARCHS", True) or ""
#bb.note("ML_PACKAGE_ARCHS %s %s %s" % (eext[1], localdata.getVar("PACKAGE_ARCHS", True) or "(none)", overrides))
d.setVar('MULTILIB_PACKAGE_ARCHS', ml_package_archs)
}

View File

@@ -0,0 +1,45 @@
PRSERV_DUMPOPT_VERSION = "${PRAUTOINX}"
PRSERV_DUMPOPT_PKGARCH = ""
PRSERV_DUMPOPT_CHECKSUM = ""
PRSERV_DUMPOPT_COL = "0"
PRSERV_DUMPDIR ??= "${LOG_DIR}/db"
PRSERV_DUMPFILE ??= "${PRSERV_DUMPDIR}/prserv.inc"
python prexport_handler () {
import bb.event
if not e.data:
return
if isinstance(e, bb.event.RecipeParsed):
import oe.prservice
#get all PR values for the current PRAUTOINX
ver = e.data.getVar('PRSERV_DUMPOPT_VERSION', True)
ver = ver.replace('%','-')
retval = oe.prservice.prserv_dump_db(e.data)
if not retval:
bb.fatal("prexport_handler: export failed!")
(metainfo, datainfo) = retval
if not datainfo:
bb.error("prexport_handler: No AUROPR values found for %s" % ver)
return
oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
elif isinstance(e, bb.event.ParseStarted):
import bb.utils
#remove dumpfile
bb.utils.remove(e.data.getVar('PRSERV_DUMPFILE', True))
elif isinstance(e, bb.event.ParseCompleted):
import oe.prservice
#dump meta info of tables
d = e.data.createCopy()
d.setVar('PRSERV_DUMPOPT_COL', "1")
retval = oe.prservice.prserv_dump_db(d)
if not retval:
bb.error("prexport_handler: export failed!")
return
(metainfo, datainfo) = retval
oe.prservice.prserv_export_tofile(d, metainfo, None, True)
}
addhandler prexport_handler

View File

@@ -0,0 +1,17 @@
python primport_handler () {
import bb.event
if not e.data:
return
if isinstance(e, bb.event.ParseCompleted):
import oe.prservice
#import all exported AUTOPR values
imported = oe.prservice.prserv_import_db(e.data)
if imported is None:
bb.fatal("import failed!")
for (version, pkgarch, checksum, value) in imported:
bb.note("imported (%s,%s,%s,%d)" % (version, pkgarch, checksum, value))
}
addhandler primport_handler

View File

@@ -0,0 +1,21 @@
def prserv_get_pr_auto(d):
import oe.prservice
if d.getVar('USE_PR_SERV', True) != "1":
bb.warn("Not using network based PR service")
return None
version = d.getVar("PRAUTOINX", True)
pkgarch = d.getVar("PACKAGE_ARCH", True)
checksum = d.getVar("BB_TASKHASH", True)
if d.getVar('PRSERV_LOCKDOWN', True):
auto_rev = d.getVar('PRAUTO_' + version + '_' + pkgarch, True) or d.getVar('PRAUTO_' + version, True) or None
else:
conn = d.getVar("__PRSERV_CONN", True)
if conn is None:
conn = oe.prservice.prserv_make_conn(d)
if conn is None:
return None
auto_rev = conn.getPR(version, pkgarch, checksum)
return auto_rev

View File

@@ -0,0 +1,3 @@
PYTHON_BASEVERSION ?= "2.7"
PYTHON_DIR = "python${PYTHON_BASEVERSION}"
PYTHON_SITEPACKAGES_DIR = "${libdir}/${PYTHON_DIR}/site-packages"

View File

@@ -0,0 +1,15 @@
#
# This class contains functions for recipes that need QEMU or test for its
# existance.
#
def qemu_target_binary(data):
import bb
target_arch = data.getVar("TARGET_ARCH", 1)
if target_arch in ("i486", "i586", "i686"):
target_arch = "i386"
elif target_arch == "powerpc":
target_arch = "ppc"
return "qemu-" + target_arch

View File

@@ -0,0 +1,24 @@
#
# QMake variables for Qt4
#
inherit qmake_base
DEPENDS_prepend = "qt4-tools-native "
export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34"
export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4"
export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4"
export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4"
export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4"
export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2"
export OE_QMAKE_LINK = "${CXX}"
export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
export OE_QMAKE_LIBS_QT = "qt"
export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"

View File

@@ -0,0 +1,108 @@
QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake"
OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
# We override this completely to eliminate the -e normally passed in
EXTRA_OEMAKE = ' MAKEFLAGS= '
export OE_QMAKE_CC="${CC}"
export OE_QMAKE_CFLAGS="${CFLAGS}"
export OE_QMAKE_CXX="${CXX}"
export OE_QMAKE_LDFLAGS="${LDFLAGS}"
export OE_QMAKE_AR="${AR}"
export OE_QMAKE_STRIP="echo"
export OE_QMAKE_RPATH="-Wl,-rpath-link,"
# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11
oe_qmake_mkspecs () {
mkdir -p mkspecs/${OE_QMAKE_PLATFORM}
for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do
if [ -L $f ]; then
lnk=`readlink $f`
if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then
ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f`
else
cp $f mkspecs/${OE_QMAKE_PLATFORM}/
fi
else
cp $f mkspecs/${OE_QMAKE_PLATFORM}/
fi
done
}
do_generate_qt_config_file() {
export QT_CONF_PATH=${WORKDIR}/qt.conf
cat > ${WORKDIR}/qt.conf <<EOF
[Paths]
Prefix =
Binaries = ${STAGING_BINDIR_NATIVE}
Headers = ${STAGING_INCDIR}/qt4
Plugins = ${STAGING_LIBDIR}/qt4/plugins/
Mkspecs = ${STAGING_DATADIR}/qt4/mkspecs/
EOF
}
addtask generate_qt_config_file after do_patch before do_configure
qmake_base_do_configure() {
case ${QMAKESPEC} in
*linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++|*linux-uclibceabi-oe-g++)
;;
*-oe-g++)
die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
;;
*)
bbnote Searching for qmake spec file
paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-x86-g++ $paths"
fi
for i in $paths; do
if test -e $i; then
export QMAKESPEC=$i
break
fi
done
;;
esac
bbnote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
if [ -z "${QMAKE_PROFILES}" ]; then
PROFILES="`ls *.pro`"
else
PROFILES="${QMAKE_PROFILES}"
fi
if [ -z "$PROFILES" ]; then
die "QMAKE_PROFILES not set and no profiles found in $PWD"
fi
if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
AFTER="-after"
QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
bbnote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
fi
if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
bbnote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
fi
# Hack .pro files to use OE utilities
find -name '*.pro' \
-exec sed -i -e 's,=\s*.*/lrelease,= ${OE_QMAKE_LRELEASE},g' \
-e 's,=\s*.*/lupdate,= ${OE_QMAKE_LUPDATE},g' '{}' ';'
#bbnote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
unset QMAKESPEC || true
${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
}
EXPORT_FUNCTIONS do_configure
addtask configure after do_unpack do_patch before do_compile

View File

@@ -0,0 +1,19 @@
DEPENDS_prepend = "${@["qt4-embedded ", ""][(d.getVar('PN', 1)[:12] == 'qt4-embedded')]}"
inherit qmake2
QT_BASE_NAME = "qt4-embedded"
QT_DIR_NAME = "qtopia"
QT_LIBINFIX = "E"
# override variables set by qmake-base to compile Qt/Embedded apps
#
export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
export OE_QMAKE_LIBS_QT = "qt"
export OE_QMAKE_LIBS_X11 = ""
export OE_QMAKE_EXTRA_MODULES = "network"
EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
# Qt4 uses atomic instructions not supported in thumb mode
ARM_INSTRUCTION_SET = "arm"

View File

@@ -0,0 +1,10 @@
DEPENDS_prepend = "${@["qt4-x11-free ", ""][(d.getVar('BPN', True)[:12] == 'qt4-x11-free')]}"
inherit qmake2
QT_BASE_NAME = "qt4"
QT_DIR_NAME = "qt4"
QT_LIBINFIX = ""
# Qt4 uses atomic instructions not supported in thumb mode
ARM_INSTRUCTION_SET = "arm"

View File

@@ -0,0 +1,179 @@
def __note(msg, d):
bb.note("%s: recipe_sanity: %s" % (d.getVar("P", 1), msg))
__recipe_sanity_badruntimevars = "RDEPENDS RPROVIDES RRECOMMENDS RCONFLICTS"
def bad_runtime_vars(cfgdata, d):
if bb.data.inherits_class("native", d) or \
bb.data.inherits_class("cross", d):
return
for var in d.getVar("__recipe_sanity_badruntimevars", 1).split():
val = d.getVar(var, 0)
if val and val != cfgdata.get(var):
__note("%s should be %s_${PN}" % (var, var), d)
__recipe_sanity_reqvars = "DESCRIPTION"
__recipe_sanity_reqdiffvars = "LICENSE"
def req_vars(cfgdata, d):
for var in d.getVar("__recipe_sanity_reqvars", 1).split():
if not d.getVar(var, 0):
__note("%s should be set" % var, d)
for var in d.getVar("__recipe_sanity_reqdiffvars", 1).split():
val = d.getVar(var, 0)
cfgval = cfgdata.get(var)
# Hardcoding is bad, but I'm lazy. We don't care about license being
# unset if the recipe has no sources!
if var == "LICENSE" and d.getVar("SRC_URI", 1) == cfgdata.get("SRC_URI"):
continue
if not val:
__note("%s should be set" % var, d)
elif val == cfgval:
__note("%s should be defined to something other than default (%s)" % (var, cfgval), d)
def var_renames_overwrite(cfgdata, d):
renames = d.getVar("__recipe_sanity_renames", 0)
if renames:
for (key, newkey, oldvalue, newvalue) in renames:
if oldvalue != newvalue and oldvalue != cfgdata.get(newkey):
__note("rename of variable '%s' to '%s' overwrote existing value '%s' with '%s'." % (key, newkey, oldvalue, newvalue), d)
def incorrect_nonempty_PACKAGES(cfgdata, d):
if bb.data.inherits_class("native", d) or \
bb.data.inherits_class("cross", d):
if d.getVar("PACKAGES", 1):
return True
def can_use_autotools_base(cfgdata, d):
cfg = d.getVar("do_configure", 1)
if not bb.data.inherits_class("autotools", d):
return False
for i in ["autoreconf"] + ["%s_do_configure" % cls for cls in ["gnomebase", "gnome", "e", "autotools", "efl", "gpephone", "openmoko", "openmoko2", "xfce", "xlibs"]]:
if cfg.find(i) != -1:
return False
import os
for clsfile in d.getVar("__inherit_cache", 0):
(base, _) = os.path.splitext(os.path.basename(clsfile))
if cfg.find("%s_do_configure" % base) != -1:
__note("autotools_base usage needs verification, spotted %s_do_configure" % base, d)
return True
def can_remove_FILESPATH(cfgdata, d):
expected = cfgdata.get("FILESPATH")
#expected = "${@':'.join([os.path.normpath(os.path.join(fp, p, o)) for fp in d.getVar('FILESPATHBASE', 1).split(':') for p in d.getVar('FILESPATHPKG', 1).split(':') for o in (d.getVar('OVERRIDES', 1) + ':').split(':') if os.path.exists(os.path.join(fp, p, o))])}:${FILESDIR}"
expectedpaths = bb.data.expand(expected, d)
unexpanded = d.getVar("FILESPATH", 0)
filespath = d.getVar("FILESPATH", 1).split(":")
filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
for fp in filespath:
if not fp in expectedpaths:
# __note("Path %s in FILESPATH not in the expected paths %s" %
# (fp, expectedpaths), d)
return False
return expected != unexpanded
def can_remove_FILESDIR(cfgdata, d):
expected = cfgdata.get("FILESDIR")
#expected = "${@bb.which(d.getVar('FILESPATH', 1), '.')}"
unexpanded = d.getVar("FILESDIR", 0)
if unexpanded is None:
return False
expanded = os.path.normpath(d.getVar("FILESDIR", 1))
filespath = d.getVar("FILESPATH", 1).split(":")
filespath = [os.path.normpath(f) for f in filespath if os.path.exists(f)]
return unexpanded != expected and \
os.path.exists(expanded) and \
(expanded in filespath or
expanded == bb.data.expand(expected, d))
def can_remove_others(p, cfgdata, d):
for k in ["S", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
"SECTION", "PACKAGES", "EXTRA_OECONF", "EXTRA_OEMAKE"]:
#for k in cfgdata:
unexpanded = d.getVar(k, 0)
cfgunexpanded = cfgdata.get(k)
if not cfgunexpanded:
continue
try:
expanded = d.getVar(k, 1)
cfgexpanded = bb.data.expand(cfgunexpanded, d)
except bb.fetch.ParameterError:
continue
if unexpanded != cfgunexpanded and \
cfgexpanded == expanded:
__note("candidate for removal of %s" % k, d)
bb.debug(1, "%s: recipe_sanity: cfg's '%s' and d's '%s' both expand to %s" %
(p, cfgunexpanded, unexpanded, expanded))
python do_recipe_sanity () {
p = d.getVar("P", 1)
p = "%s %s %s" % (d.getVar("PN", 1), d.getVar("PV", 1), d.getVar("PR", 1))
sanitychecks = [
(can_remove_FILESDIR, "candidate for removal of FILESDIR"),
(can_remove_FILESPATH, "candidate for removal of FILESPATH"),
#(can_use_autotools_base, "candidate for use of autotools_base"),
(incorrect_nonempty_PACKAGES, "native or cross recipe with non-empty PACKAGES"),
]
cfgdata = d.getVar("__recipe_sanity_cfgdata", 0)
for (func, msg) in sanitychecks:
if func(cfgdata, d):
__note(msg, d)
can_remove_others(p, cfgdata, d)
var_renames_overwrite(cfgdata, d)
req_vars(cfgdata, d)
bad_runtime_vars(cfgdata, d)
}
do_recipe_sanity[nostamp] = "1"
#do_recipe_sanity[recrdeptask] = "do_recipe_sanity"
addtask recipe_sanity
do_recipe_sanity_all[nostamp] = "1"
do_recipe_sanity_all[recrdeptask] = "do_recipe_sanity"
do_recipe_sanity_all () {
:
}
addtask recipe_sanity_all after do_recipe_sanity
python recipe_sanity_eh () {
from bb.event import getName
if getName(e) != "ConfigParsed":
return
d = e.data
cfgdata = {}
for k in d.keys():
#for k in ["S", "PR", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
# "SECTION"]:
cfgdata[k] = d.getVar(k, 0)
d.setVar("__recipe_sanity_cfgdata", cfgdata)
#d.setVar("__recipe_sanity_cfgdata", d)
# Sick, very sick..
from bb.data_smart import DataSmart
old = DataSmart.renameVar
def myrename(self, key, newkey):
oldvalue = self.getVar(newkey, 0)
old(self, key, newkey)
newvalue = self.getVar(newkey, 0)
if oldvalue:
renames = self.getVar("__recipe_sanity_renames", 0) or set()
renames.add((key, newkey, oldvalue, newvalue))
self.setVar("__recipe_sanity_renames", renames)
DataSmart.renameVar = myrename
}
addhandler recipe_sanity_eh

View File

@@ -0,0 +1,94 @@
SYSROOT_PREPROCESS_FUNCS += "relocatable_binaries_preprocess"
CHRPATH_BIN ?= "chrpath"
PREPROCESS_RELOCATE_DIRS ?= ""
def process_dir (directory, d):
import subprocess as sub
import stat
cmd = bb.data.expand('${CHRPATH_BIN}', d)
tmpdir = d.getVar('TMPDIR')
basedir = bb.data.expand('${base_prefix}', d)
#bb.debug("Checking %s for binaries to process" % directory)
if not os.path.exists(directory):
return
dirs = os.listdir(directory)
for file in dirs:
fpath = directory + "/" + file
fpath = os.path.normpath(fpath)
if os.path.islink(fpath):
# Skip symlinks
continue
if os.path.isdir(fpath):
process_dir(fpath, d)
else:
#bb.note("Testing %s for relocatability" % fpath)
# We need read and write permissions for chrpath, if we don't have
# them then set them temporarily. Take a copy of the files
# permissions so that we can restore them afterwards.
perms = os.stat(fpath)[stat.ST_MODE]
if os.access(fpath, os.W_OK|os.R_OK):
perms = None
else:
# Temporarily make the file writeable so we can chrpath it
os.chmod(fpath, perms|stat.S_IRWXU)
p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
err, out = p.communicate()
# If returned succesfully, process stderr for results
if p.returncode != 0:
continue
# Throw away everything other than the rpath list
curr_rpath = err.partition("RPATH=")[2]
#bb.note("Current rpath for %s is %s" % (fpath, curr_rpath.strip()))
rpaths = curr_rpath.split(":")
new_rpaths = []
for rpath in rpaths:
# If rpath is already dynamic continue
if rpath.find("$ORIGIN") != -1:
continue
# If the rpath shares a root with base_prefix determine a new dynamic rpath from the
# base_prefix shared root
if rpath.find(basedir) != -1:
depth = fpath.partition(basedir)[2].count('/')
libpath = rpath.partition(basedir)[2].strip()
# otherwise (i.e. cross packages) determine a shared root based on the TMPDIR
# NOTE: This will not work reliably for cross packages, particularly in the case
# where your TMPDIR is a short path (i.e. /usr/poky) as chrpath cannot insert an
# rpath longer than that which is already set.
else:
depth = fpath.rpartition(tmpdir)[2].count('/')
libpath = rpath.partition(tmpdir)[2].strip()
base = "$ORIGIN"
while depth > 1:
base += "/.."
depth-=1
new_rpaths.append("%s%s" % (base, libpath))
# if we have modified some rpaths call chrpath to update the binary
if len(new_rpaths):
args = ":".join(new_rpaths)
#bb.note("Setting rpath for %s to %s" %(fpath, args))
sub.call([cmd, '-r', args, fpath])
if perms:
os.chmod(fpath, perms)
def rpath_replace (path, d):
bindirs = bb.data.expand("${bindir} ${sbindir} ${base_sbindir} ${base_bindir} ${libdir} ${base_libdir} ${libexecdir} ${PREPROCESS_RELOCATE_DIRS}", d).split()
for bindir in bindirs:
#bb.note ("Processing directory " + bindir)
directory = path + "/" + bindir
process_dir (directory, d)
python relocatable_binaries_preprocess() {
rpath_replace(bb.data.expand('${SYSROOT_DESTDIR}', d), d)
}

View File

@@ -0,0 +1,77 @@
#
# Removes source after build
#
# To use it add that line to conf/local.conf:
#
# INHERIT += "rm_work"
#
# Use the completion scheulder by default when rm_work is active
# to try and reduce disk usage
BB_SCHEDULER ?= "completion"
RMWORK_ORIG_TASK := "${BB_DEFAULT_TASK}"
BB_DEFAULT_TASK = "rm_work_all"
do_rm_work () {
cd ${WORKDIR}
for dir in *
do
if [ `basename ${S}` = $dir ]; then
rm -rf $dir
# The package and packages-split directories are retained by sstate for
# do_package so we retain them here too. Anything in sstate 'plaindirs'
# should be retained. Also retain logs and other files in temp.
elif [ $dir != 'temp' ] && [ $dir != 'package' ] && [ $dir != 'packages-split' ]; then
rm -rf $dir
fi
done
# Need to add pseudo back or subsqeuent work in this workdir
# might fail since setscene may not rerun to recreate it
mkdir ${WORKDIR}/pseudo/
# Change normal stamps into setscene stamps as they better reflect the
# fact WORKDIR is now empty
# Also leave noexec stamps since setscene stamps don't cover them
cd `dirname ${STAMP}`
for i in `basename ${STAMP}`*
do
for j in ${SSTATETASKS}
do
case $i in
*do_setscene*)
break
;;
*sigdata*)
i=dummy
break
;;
*do_package_write*)
i=dummy
break
;;
*do_build*)
i=dummy
break
;;
*_setscene*)
i=dummy
break
;;
*$j|*$j.*)
mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
i=dummy
break
;;
esac
done
rm -f $i
done
}
addtask rm_work after do_${RMWORK_ORIG_TASK}
do_rm_work_all () {
:
}
do_rm_work_all[recrdeptask] = "do_rm_work"
addtask rm_work_all after do_rm_work

View File

@@ -0,0 +1,123 @@
#
# Copyright 2006-2007 Openedhand Ltd.
#
ROOTFS_PKGMANAGE = "run-postinsts dpkg apt"
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
do_rootfs[recrdeptask] += "do_package_write_deb"
DEB_POSTPROCESS_COMMANDS = "rootfs_install_all_locales; "
opkglibdir = "${localstatedir}/lib/opkg"
deb_package_setflag() {
sed -i -e "/^Package: $2\$/{n; s/Status: install ok .*/Status: install ok $1/;}" ${IMAGE_ROOTFS}/var/lib/dpkg/status
}
deb_package_getflag() {
cat ${IMAGE_ROOTFS}/var/lib/dpkg/status | sed -n -e "/^Package: $2\$/{n; s/Status: install ok .*/$1/; p}"
}
fakeroot rootfs_deb_do_rootfs () {
set +e
mkdir -p ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
# update index
package_update_index_deb
#install packages
export INSTALL_ROOTFS_DEB="${IMAGE_ROOTFS}"
export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
export INSTALL_PACKAGES_NORMAL_DEB="${PACKAGE_INSTALL}"
export INSTALL_PACKAGES_ATTEMPTONLY_DEB="${PACKAGE_INSTALL_ATTEMPTONLY}"
export INSTALL_PACKAGES_LINGUAS_DEB="${LINGUAS_INSTALL}"
export INSTALL_TASK_DEB="rootfs"
package_install_internal_deb
${DEB_POSTPROCESS_COMMANDS}
export D=${IMAGE_ROOTFS}
export OFFLINE_ROOT=${IMAGE_ROOTFS}
export IPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
export OPKG_OFFLINE_ROOT=${IMAGE_ROOTFS}
# Attempt to run preinsts
# Mark packages with preinst failures as unpacked
for i in ${IMAGE_ROOTFS}/var/lib/dpkg/info/*.preinst; do
if [ -f $i ] && ! sh $i; then
deb_package_setflag unpacked `basename $i .preinst`
fi
done
# Attempt to run postinsts
# Mark packages with postinst failures as unpacked
for i in ${IMAGE_ROOTFS}/var/lib/dpkg/info/*.postinst; do
if [ -f $i ] && ! sh $i configure; then
deb_package_setflag unpacked `basename $i .postinst`
fi
done
set -e
install -d ${IMAGE_ROOTFS}/${sysconfdir}
echo ${BUILDNAME} > ${IMAGE_ROOTFS}/${sysconfdir}/version
# Hacks to allow opkg's update-alternatives and opkg to coexist for now
mkdir -p ${IMAGE_ROOTFS}${opkglibdir}
if [ -e ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives ]; then
rmdir ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
fi
ln -s ${opkglibdir}/alternatives ${IMAGE_ROOTFS}/var/lib/dpkg/alternatives
ln -s /var/lib/dpkg/info ${IMAGE_ROOTFS}${opkglibdir}/info
ln -s /var/lib/dpkg/status ${IMAGE_ROOTFS}${opkglibdir}/status
${ROOTFS_POSTPROCESS_COMMAND}
log_check rootfs
}
remove_packaging_data_files() {
rm -rf ${IMAGE_ROOTFS}${opkglibdir}
rm -rf ${IMAGE_ROOTFS}/usr/dpkg/
}
DPKG_QUERY_COMMAND = "${STAGING_BINDIR_NATIVE}/dpkg --admindir=${IMAGE_ROOTFS}/var/lib/dpkg"
list_installed_packages() {
${DPKG_QUERY_COMMAND} -l | grep ^ii | awk '{ print $2 }'
}
get_package_filename() {
fullname=`find ${DEPLOY_DIR_DEB} -name "$1_*.deb" || true`
if [ "$fullname" = "" ] ; then
echo $name
else
echo $fullname
fi
}
list_package_depends() {
${DPKG_QUERY_COMMAND} -s $1 | grep ^Depends | sed -e 's/^Depends: //' -e 's/,//g' -e 's:([=<>]* [0-9a-zA-Z.~\-]*)::g'
}
list_package_recommends() {
${DPKG_QUERY_COMMAND} -s $1 | grep ^Recommends | sed -e 's/^Recommends: //' -e 's/,//g' -e 's:([=<>]* [0-9a-zA-Z.~\-]*)::g'
}
rootfs_check_package_exists() {
if [ `apt-cache showpkg $1 | wc -l` -gt 2 ]; then
echo $1
fi
}
rootfs_install_packages() {
${STAGING_BINDIR_NATIVE}/apt-get install $@ --force-yes --allow-unauthenticated
for pkg in $@ ; do
deb_package_setflag installed $pkg
done
}

Some files were not shown because too many files have changed in this diff Show More