lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame^] | 1 | BB_DEFAULT_TASK ?= "build" |
| 2 | CLASSOVERRIDE ?= "class-target" |
| 3 | |
| 4 | inherit patch |
| 5 | inherit staging |
| 6 | |
| 7 | inherit mirrors |
| 8 | inherit utils |
| 9 | inherit utility-tasks |
| 10 | inherit metadata_scm |
| 11 | inherit logging |
| 12 | |
| 13 | OE_EXTRA_IMPORTS ?= "" |
| 14 | |
| 15 | OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license ${OE_EXTRA_IMPORTS}" |
| 16 | OE_IMPORTS[type] = "list" |
| 17 | |
| 18 | PACKAGECONFIG_CONFARGS ??= "" |
| 19 | |
| 20 | def oe_import(d): |
| 21 | import sys |
| 22 | |
| 23 | bbpath = d.getVar("BBPATH").split(":") |
| 24 | sys.path[0:0] = [os.path.join(dir, "lib") for dir in bbpath] |
| 25 | |
| 26 | def inject(name, value): |
| 27 | """Make a python object accessible from the metadata""" |
| 28 | if hasattr(bb.utils, "_context"): |
| 29 | bb.utils._context[name] = value |
| 30 | else: |
| 31 | __builtins__[name] = value |
| 32 | |
| 33 | import oe.data |
| 34 | for toimport in oe.data.typed_value("OE_IMPORTS", d): |
| 35 | try: |
| 36 | imported = __import__(toimport) |
| 37 | inject(toimport.split(".", 1)[0], imported) |
| 38 | except AttributeError as e: |
| 39 | bb.error("Error importing OE modules: %s" % str(e)) |
| 40 | return "" |
| 41 | |
| 42 | # We need the oe module name space early (before INHERITs get added) |
| 43 | OE_IMPORTED := "${@oe_import(d)}" |
| 44 | |
| 45 | def lsb_distro_identifier(d): |
| 46 | adjust = d.getVar('LSB_DISTRO_ADJUST') |
| 47 | adjust_func = None |
| 48 | if adjust: |
| 49 | try: |
| 50 | adjust_func = globals()[adjust] |
| 51 | except KeyError: |
| 52 | pass |
| 53 | return oe.lsb.distro_identifier(adjust_func) |
| 54 | |
| 55 | die() { |
| 56 | bbfatal_log "$*" |
| 57 | } |
| 58 | |
| 59 | oe_runmake_call() { |
| 60 | bbnote ${MAKE} ${EXTRA_OEMAKE} "$@" |
| 61 | ${MAKE} ${EXTRA_OEMAKE} "$@" |
| 62 | } |
| 63 | |
| 64 | oe_runmake() { |
| 65 | oe_runmake_call "$@" || die "oe_runmake failed" |
| 66 | } |
| 67 | |
| 68 | |
| 69 | def base_dep_prepend(d): |
| 70 | if d.getVar('INHIBIT_DEFAULT_DEPS', False): |
| 71 | return "" |
| 72 | return "${BASE_DEFAULT_DEPS}" |
| 73 | |
| 74 | BASE_DEFAULT_DEPS = "virtual/${TARGET_PREFIX}gcc virtual/${TARGET_PREFIX}compilerlibs virtual/libc" |
| 75 | |
| 76 | BASEDEPENDS = "" |
| 77 | BASEDEPENDS_class-target = "${@base_dep_prepend(d)}" |
| 78 | BASEDEPENDS_class-nativesdk = "${@base_dep_prepend(d)}" |
| 79 | |
| 80 | DEPENDS_prepend="${BASEDEPENDS} " |
| 81 | |
| 82 | FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}" |
| 83 | # THISDIR only works properly with imediate expansion as it has to run |
| 84 | # in the context of the location its used (:=) |
| 85 | THISDIR = "${@os.path.dirname(d.getVar('FILE'))}" |
| 86 | |
| 87 | def extra_path_elements(d): |
| 88 | path = "" |
| 89 | elements = (d.getVar('EXTRANATIVEPATH') or "").split() |
| 90 | for e in elements: |
| 91 | path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":" |
| 92 | return path |
| 93 | |
| 94 | PATH_prepend = "${@extra_path_elements(d)}" |
| 95 | |
| 96 | def get_lic_checksum_file_list(d): |
| 97 | filelist = [] |
| 98 | lic_files = d.getVar("LIC_FILES_CHKSUM") or '' |
| 99 | tmpdir = d.getVar("TMPDIR") |
| 100 | s = d.getVar("S") |
| 101 | b = d.getVar("B") |
| 102 | workdir = d.getVar("WORKDIR") |
| 103 | |
| 104 | urls = lic_files.split() |
| 105 | for url in urls: |
| 106 | # We only care about items that are absolute paths since |
| 107 | # any others should be covered by SRC_URI. |
| 108 | try: |
| 109 | (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url) |
| 110 | if method != "file" or not path: |
| 111 | raise bb.fetch.MalformedUrl(url) |
| 112 | |
| 113 | if path[0] == '/': |
| 114 | if path.startswith((tmpdir, s, b, workdir)): |
| 115 | continue |
| 116 | filelist.append(path + ":" + str(os.path.exists(path))) |
| 117 | except bb.fetch.MalformedUrl: |
| 118 | bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url) |
| 119 | return " ".join(filelist) |
| 120 | |
| 121 | def setup_hosttools_dir(dest, toolsvar, d, fatal=True): |
| 122 | tools = d.getVar(toolsvar).split() |
| 123 | origbbenv = d.getVar("BB_ORIGENV", False) |
| 124 | path = origbbenv.getVar("PATH") |
| 125 | bb.utils.mkdirhier(dest) |
| 126 | notfound = [] |
| 127 | for tool in tools: |
| 128 | desttool = os.path.join(dest, tool) |
| 129 | if not os.path.exists(desttool): |
| 130 | # clean up dead symlink |
| 131 | if os.path.islink(desttool): |
| 132 | os.unlink(desttool) |
| 133 | srctool = bb.utils.which(path, tool, executable=True) |
| 134 | # gcc/g++ may link to ccache on some hosts, e.g., |
| 135 | # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc) |
| 136 | # would return /usr/local/bin/ccache/gcc, but what we need is |
| 137 | # /usr/bin/gcc, this code can check and fix that. |
| 138 | if "ccache" in srctool: |
| 139 | srctool = bb.utils.which(path, tool, executable=True, direction=1) |
| 140 | if srctool: |
| 141 | os.symlink(srctool, desttool) |
| 142 | else: |
| 143 | notfound.append(tool) |
| 144 | |
| 145 | if notfound and fatal: |
| 146 | bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound)) |
| 147 | |
| 148 | addtask fetch |
| 149 | do_fetch[dirs] = "${DL_DIR}" |
| 150 | do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}" |
| 151 | do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}" |
| 152 | do_fetch[vardeps] += "SRCREV" |
| 153 | python base_do_fetch() { |
| 154 | |
| 155 | src_uri = (d.getVar('SRC_URI') or "").split() |
| 156 | if not src_uri: |
| 157 | return |
| 158 | |
| 159 | try: |
| 160 | fetcher = bb.fetch2.Fetch(src_uri, d) |
| 161 | fetcher.download() |
| 162 | except bb.fetch2.BBFetchException as e: |
| 163 | bb.fatal("Bitbake Fetcher Error: " + repr(e)) |
| 164 | } |
| 165 | |
| 166 | addtask unpack after do_fetch |
| 167 | do_unpack[dirs] = "${WORKDIR}" |
| 168 | |
| 169 | do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}" |
| 170 | |
| 171 | python base_do_unpack() { |
| 172 | src_uri = (d.getVar('SRC_URI') or "").split() |
| 173 | if not src_uri: |
| 174 | return |
| 175 | |
| 176 | try: |
| 177 | fetcher = bb.fetch2.Fetch(src_uri, d) |
| 178 | fetcher.unpack(d.getVar('WORKDIR')) |
| 179 | except bb.fetch2.BBFetchException as e: |
| 180 | bb.fatal("Bitbake Fetcher Error: " + repr(e)) |
| 181 | } |
| 182 | |
| 183 | def get_layers_branch_rev(d): |
| 184 | layers = (d.getVar("BBLAYERS") or "").split() |
| 185 | layers_branch_rev = ["%-20s = \"%s:%s\"" % (os.path.basename(i), \ |
| 186 | base_get_metadata_git_branch(i, None).strip(), \ |
| 187 | base_get_metadata_git_revision(i, None)) \ |
| 188 | for i in layers] |
| 189 | i = len(layers_branch_rev)-1 |
| 190 | p1 = layers_branch_rev[i].find("=") |
| 191 | s1 = layers_branch_rev[i][p1:] |
| 192 | while i > 0: |
| 193 | p2 = layers_branch_rev[i-1].find("=") |
| 194 | s2= layers_branch_rev[i-1][p2:] |
| 195 | if s1 == s2: |
| 196 | layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2] |
| 197 | i -= 1 |
| 198 | else: |
| 199 | i -= 1 |
| 200 | p1 = layers_branch_rev[i].find("=") |
| 201 | s1= layers_branch_rev[i][p1:] |
| 202 | return layers_branch_rev |
| 203 | |
| 204 | |
| 205 | BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars" |
| 206 | BUILDCFG_FUNCS[type] = "list" |
| 207 | |
| 208 | def buildcfg_vars(d): |
| 209 | statusvars = oe.data.typed_value('BUILDCFG_VARS', d) |
| 210 | for var in statusvars: |
| 211 | value = d.getVar(var) |
| 212 | if value is not None: |
| 213 | yield '%-20s = "%s"' % (var, value) |
| 214 | |
| 215 | def buildcfg_neededvars(d): |
| 216 | needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d) |
| 217 | pesteruser = [] |
| 218 | for v in needed_vars: |
| 219 | val = d.getVar(v) |
| 220 | if not val or val == 'INVALID': |
| 221 | pesteruser.append(v) |
| 222 | |
| 223 | if pesteruser: |
| 224 | bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser)) |
| 225 | |
| 226 | addhandler base_eventhandler |
| 227 | base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed" |
| 228 | python base_eventhandler() { |
| 229 | import bb.runqueue |
| 230 | |
| 231 | if isinstance(e, bb.event.ConfigParsed): |
| 232 | if not d.getVar("NATIVELSBSTRING", False): |
| 233 | d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d)) |
| 234 | d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False)) |
| 235 | d.setVar('BB_VERSION', bb.__version__) |
| 236 | |
| 237 | # There might be no bb.event.ConfigParsed event if bitbake server is |
| 238 | # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR} |
| 239 | # exists. |
| 240 | if isinstance(e, bb.event.ConfigParsed) or \ |
| 241 | (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))): |
| 242 | # Works with the line in layer.conf which changes PATH to point here |
| 243 | setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d) |
| 244 | setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False) |
| 245 | |
| 246 | if isinstance(e, bb.event.MultiConfigParsed): |
| 247 | # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores |
| 248 | # own contexts so the variables get expanded correctly for that arch, then inject back into |
| 249 | # the main data store. |
| 250 | deps = [] |
| 251 | for config in e.mcdata: |
| 252 | deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS")) |
| 253 | deps = " ".join(deps) |
| 254 | e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps) |
| 255 | |
| 256 | if isinstance(e, bb.event.BuildStarted): |
| 257 | localdata = bb.data.createCopy(d) |
| 258 | statuslines = [] |
| 259 | for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata): |
| 260 | g = globals() |
| 261 | if func not in g: |
| 262 | bb.warn("Build configuration function '%s' does not exist" % func) |
| 263 | else: |
| 264 | flines = g[func](localdata) |
| 265 | if flines: |
| 266 | statuslines.extend(flines) |
| 267 | |
| 268 | statusheader = d.getVar('BUILDCFG_HEADER') |
| 269 | if statusheader: |
| 270 | bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines))) |
| 271 | |
| 272 | # This code is to silence warnings where the SDK variables overwrite the |
| 273 | # target ones and we'd see dulpicate key names overwriting each other |
| 274 | # for various PREFERRED_PROVIDERS |
| 275 | if isinstance(e, bb.event.RecipePreFinalise): |
| 276 | if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"): |
| 277 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils") |
| 278 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc") |
| 279 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++") |
| 280 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs") |
| 281 | |
| 282 | if isinstance(e, bb.event.RecipeParsed): |
| 283 | # |
| 284 | # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set |
| 285 | # skip parsing for all the other providers which will mean they get uninstalled from the |
| 286 | # sysroot since they're now "unreachable". This makes switching virtual/kernel work in |
| 287 | # particular. |
| 288 | # |
| 289 | pn = d.getVar('PN') |
| 290 | source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) |
| 291 | if not source_mirror_fetch: |
| 292 | provs = (d.getVar("PROVIDES") or "").split() |
| 293 | multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST") or "").split() |
| 294 | for p in provs: |
| 295 | if p.startswith("virtual/") and p not in multiwhitelist: |
| 296 | profprov = d.getVar("PREFERRED_PROVIDER_" + p) |
| 297 | if profprov and pn != profprov: |
| 298 | raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn)) |
| 299 | } |
| 300 | |
| 301 | CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate" |
| 302 | CLEANBROKEN = "0" |
| 303 | |
| 304 | addtask configure after do_patch |
| 305 | do_configure[dirs] = "${B}" |
| 306 | base_do_configure() { |
| 307 | if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then |
| 308 | if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then |
| 309 | cd ${B} |
| 310 | if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then |
| 311 | oe_runmake clean |
| 312 | fi |
| 313 | # -ignore_readdir_race does not work correctly with -delete; |
| 314 | # use xargs to avoid spurious build failures |
| 315 | find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f |
| 316 | fi |
| 317 | fi |
| 318 | if [ -n "${CONFIGURESTAMPFILE}" ]; then |
| 319 | mkdir -p `dirname ${CONFIGURESTAMPFILE}` |
| 320 | echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE} |
| 321 | fi |
| 322 | } |
| 323 | |
| 324 | addtask compile after do_configure |
| 325 | do_compile[dirs] = "${B}" |
| 326 | base_do_compile() { |
| 327 | if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then |
| 328 | oe_runmake || die "make failed" |
| 329 | else |
| 330 | bbnote "nothing to compile" |
| 331 | fi |
| 332 | } |
| 333 | |
| 334 | addtask install after do_compile |
| 335 | do_install[dirs] = "${B}" |
| 336 | # Remove and re-create ${D} so that is it guaranteed to be empty |
| 337 | do_install[cleandirs] = "${D}" |
| 338 | |
| 339 | base_do_install() { |
| 340 | : |
| 341 | } |
| 342 | |
| 343 | base_do_package() { |
| 344 | : |
| 345 | } |
| 346 | |
| 347 | addtask build after do_populate_sysroot |
| 348 | do_build[noexec] = "1" |
| 349 | do_build[recrdeptask] += "do_deploy" |
| 350 | do_build () { |
| 351 | : |
| 352 | } |
| 353 | |
| 354 | def set_packagetriplet(d): |
| 355 | archs = [] |
| 356 | tos = [] |
| 357 | tvs = [] |
| 358 | |
| 359 | archs.append(d.getVar("PACKAGE_ARCHS").split()) |
| 360 | tos.append(d.getVar("TARGET_OS")) |
| 361 | tvs.append(d.getVar("TARGET_VENDOR")) |
| 362 | |
| 363 | def settriplet(d, varname, archs, tos, tvs): |
| 364 | triplets = [] |
| 365 | for i in range(len(archs)): |
| 366 | for arch in archs[i]: |
| 367 | triplets.append(arch + tvs[i] + "-" + tos[i]) |
| 368 | triplets.reverse() |
| 369 | d.setVar(varname, " ".join(triplets)) |
| 370 | |
| 371 | settriplet(d, "PKGTRIPLETS", archs, tos, tvs) |
| 372 | |
| 373 | variants = d.getVar("MULTILIB_VARIANTS") or "" |
| 374 | for item in variants.split(): |
| 375 | localdata = bb.data.createCopy(d) |
| 376 | overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item |
| 377 | localdata.setVar("OVERRIDES", overrides) |
| 378 | |
| 379 | archs.append(localdata.getVar("PACKAGE_ARCHS").split()) |
| 380 | tos.append(localdata.getVar("TARGET_OS")) |
| 381 | tvs.append(localdata.getVar("TARGET_VENDOR")) |
| 382 | |
| 383 | settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs) |
| 384 | |
| 385 | python () { |
| 386 | import string, re |
| 387 | |
| 388 | # Handle backfilling |
| 389 | oe.utils.features_backfill("DISTRO_FEATURES", d) |
| 390 | oe.utils.features_backfill("MACHINE_FEATURES", d) |
| 391 | |
| 392 | if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")): |
| 393 | d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}") |
| 394 | if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")): |
| 395 | d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}") |
| 396 | |
| 397 | # Handle PACKAGECONFIG |
| 398 | # |
| 399 | # These take the form: |
| 400 | # |
| 401 | # PACKAGECONFIG ??= "<default options>" |
| 402 | # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig" |
| 403 | pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {} |
| 404 | if pkgconfigflags: |
| 405 | pkgconfig = (d.getVar('PACKAGECONFIG') or "").split() |
| 406 | pn = d.getVar("PN") |
| 407 | |
| 408 | mlprefix = d.getVar("MLPREFIX") |
| 409 | |
| 410 | def expandFilter(appends, extension, prefix): |
| 411 | appends = bb.utils.explode_deps(d.expand(" ".join(appends))) |
| 412 | newappends = [] |
| 413 | for a in appends: |
| 414 | if a.endswith("-native") or ("-cross-" in a): |
| 415 | newappends.append(a) |
| 416 | elif a.startswith("virtual/"): |
| 417 | subs = a.split("/", 1)[1] |
| 418 | if subs.startswith(prefix): |
| 419 | newappends.append(a + extension) |
| 420 | else: |
| 421 | newappends.append("virtual/" + prefix + subs + extension) |
| 422 | else: |
| 423 | if a.startswith(prefix): |
| 424 | newappends.append(a + extension) |
| 425 | else: |
| 426 | newappends.append(prefix + a + extension) |
| 427 | return newappends |
| 428 | |
| 429 | def appendVar(varname, appends): |
| 430 | if not appends: |
| 431 | return |
| 432 | if varname.find("DEPENDS") != -1: |
| 433 | if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) : |
| 434 | appends = expandFilter(appends, "", "nativesdk-") |
| 435 | elif bb.data.inherits_class('native', d): |
| 436 | appends = expandFilter(appends, "-native", "") |
| 437 | elif mlprefix: |
| 438 | appends = expandFilter(appends, "", mlprefix) |
| 439 | varname = d.expand(varname) |
| 440 | d.appendVar(varname, " " + " ".join(appends)) |
| 441 | |
| 442 | extradeps = [] |
| 443 | extrardeps = [] |
| 444 | extrarrecs = [] |
| 445 | extraconf = [] |
| 446 | for flag, flagval in sorted(pkgconfigflags.items()): |
| 447 | items = flagval.split(",") |
| 448 | num = len(items) |
| 449 | if num > 6: |
| 450 | bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!" |
| 451 | % (d.getVar('PN'), flag)) |
| 452 | |
| 453 | if flag in pkgconfig: |
| 454 | if num >= 3 and items[2]: |
| 455 | extradeps.append(items[2]) |
| 456 | if num >= 4 and items[3]: |
| 457 | extrardeps.append(items[3]) |
| 458 | if num >= 5 and items[4]: |
| 459 | extrarrecs.append(items[4]) |
| 460 | if num >= 1 and items[0]: |
| 461 | extraconf.append(items[0]) |
| 462 | elif num >= 2 and items[1]: |
| 463 | extraconf.append(items[1]) |
| 464 | |
| 465 | if num >= 6 and items[5]: |
| 466 | conflicts = set(items[5].split()) |
| 467 | invalid = conflicts.difference(set(pkgconfigflags.keys())) |
| 468 | if invalid: |
| 469 | bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified." |
| 470 | % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid))) |
| 471 | |
| 472 | if flag in pkgconfig: |
| 473 | intersec = conflicts.intersection(set(pkgconfig)) |
| 474 | if intersec: |
| 475 | bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG." |
| 476 | % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec))) |
| 477 | |
| 478 | appendVar('DEPENDS', extradeps) |
| 479 | appendVar('RDEPENDS_${PN}', extrardeps) |
| 480 | appendVar('RRECOMMENDS_${PN}', extrarrecs) |
| 481 | appendVar('PACKAGECONFIG_CONFARGS', extraconf) |
| 482 | |
| 483 | pn = d.getVar('PN') |
| 484 | license = d.getVar('LICENSE') |
| 485 | if license == "INVALID" and pn != "defaultpkgname": |
| 486 | bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn) |
| 487 | |
| 488 | if bb.data.inherits_class('license', d): |
| 489 | check_license_format(d) |
| 490 | unmatched_license_flags = check_license_flags(d) |
| 491 | if unmatched_license_flags: |
| 492 | if len(unmatched_license_flags) == 1: |
| 493 | message = "because it has a restricted license '{0}'. Which is not whitelisted in LICENSE_FLAGS_WHITELIST".format(unmatched_license_flags[0]) |
| 494 | else: |
| 495 | message = "because it has restricted licenses {0}. Which are not whitelisted in LICENSE_FLAGS_WHITELIST".format( |
| 496 | ", ".join("'{0}'".format(f) for f in unmatched_license_flags)) |
| 497 | bb.debug(1, "Skipping %s %s" % (pn, message)) |
| 498 | raise bb.parse.SkipRecipe(message) |
| 499 | |
| 500 | # If we're building a target package we need to use fakeroot (pseudo) |
| 501 | # in order to capture permissions, owners, groups and special files |
| 502 | if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): |
| 503 | d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 504 | d.setVarFlag('do_unpack', 'umask', '022') |
| 505 | d.setVarFlag('do_configure', 'umask', '022') |
| 506 | d.setVarFlag('do_compile', 'umask', '022') |
| 507 | d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 508 | d.setVarFlag('do_install', 'fakeroot', '1') |
| 509 | d.setVarFlag('do_install', 'umask', '022') |
| 510 | d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 511 | d.setVarFlag('do_package', 'fakeroot', '1') |
| 512 | d.setVarFlag('do_package', 'umask', '022') |
| 513 | d.setVarFlag('do_package_setscene', 'fakeroot', '1') |
| 514 | d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 515 | d.setVarFlag('do_devshell', 'fakeroot', '1') |
| 516 | d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 517 | |
| 518 | need_machine = d.getVar('COMPATIBLE_MACHINE') |
| 519 | if need_machine and not d.getVar('PARSE_ALL_RECIPES', False): |
| 520 | import re |
| 521 | compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":") |
| 522 | for m in compat_machines: |
| 523 | if re.match(need_machine, m): |
| 524 | break |
| 525 | else: |
| 526 | raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE')) |
| 527 | |
| 528 | source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False) |
| 529 | if not source_mirror_fetch: |
| 530 | need_host = d.getVar('COMPATIBLE_HOST') |
| 531 | if need_host: |
| 532 | import re |
| 533 | this_host = d.getVar('HOST_SYS') |
| 534 | if not re.match(need_host, this_host): |
| 535 | raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host) |
| 536 | |
| 537 | bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split() |
| 538 | |
| 539 | check_license = False if pn.startswith("nativesdk-") else True |
| 540 | for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}", |
| 541 | "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}", |
| 542 | "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]: |
| 543 | if pn.endswith(d.expand(t)): |
| 544 | check_license = False |
| 545 | if pn.startswith("gcc-source-"): |
| 546 | check_license = False |
| 547 | |
| 548 | if check_license and bad_licenses: |
| 549 | bad_licenses = expand_wildcard_licenses(d, bad_licenses) |
| 550 | |
| 551 | whitelist = [] |
| 552 | for lic in bad_licenses: |
| 553 | spdx_license = return_spdx(d, lic) |
| 554 | whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split()) |
| 555 | if spdx_license: |
| 556 | whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split()) |
| 557 | |
| 558 | if pn in whitelist: |
| 559 | ''' |
| 560 | We need to track what we are whitelisting and why. If pn is |
| 561 | incompatible we need to be able to note that the image that |
| 562 | is created may infact contain incompatible licenses despite |
| 563 | INCOMPATIBLE_LICENSE being set. |
| 564 | ''' |
| 565 | bb.note("Including %s as buildable despite it having an incompatible license because it has been whitelisted" % pn) |
| 566 | else: |
| 567 | pkgs = d.getVar('PACKAGES').split() |
| 568 | skipped_pkgs = {} |
| 569 | unskipped_pkgs = [] |
| 570 | for pkg in pkgs: |
| 571 | incompatible_lic = incompatible_license(d, bad_licenses, pkg) |
| 572 | if incompatible_lic: |
| 573 | skipped_pkgs[pkg] = incompatible_lic |
| 574 | else: |
| 575 | unskipped_pkgs.append(pkg) |
| 576 | if unskipped_pkgs: |
| 577 | for pkg in skipped_pkgs: |
| 578 | bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg]))) |
| 579 | mlprefix = d.getVar('MLPREFIX') |
| 580 | d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, ' '.join(skipped_pkgs[pkg])) |
| 581 | for pkg in unskipped_pkgs: |
| 582 | bb.debug(1, "Including the package %s" % pkg) |
| 583 | else: |
| 584 | incompatible_lic = incompatible_license(d, bad_licenses) |
| 585 | for pkg in skipped_pkgs: |
| 586 | incompatible_lic += skipped_pkgs[pkg] |
| 587 | incompatible_lic = sorted(list(set(incompatible_lic))) |
| 588 | |
| 589 | if incompatible_lic: |
| 590 | bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic))) |
| 591 | raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic)) |
| 592 | |
| 593 | needsrcrev = False |
| 594 | srcuri = d.getVar('SRC_URI') |
| 595 | for uri in srcuri.split(): |
| 596 | (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3] |
| 597 | |
| 598 | # HTTP/FTP use the wget fetcher |
| 599 | if scheme in ("http", "https", "ftp"): |
| 600 | d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot') |
| 601 | |
| 602 | # Svn packages should DEPEND on subversion-native |
| 603 | if scheme == "svn": |
| 604 | needsrcrev = True |
| 605 | d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot') |
| 606 | |
| 607 | # Git packages should DEPEND on git-native |
| 608 | elif scheme in ("git", "gitsm"): |
| 609 | needsrcrev = True |
| 610 | d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot') |
| 611 | |
| 612 | # Mercurial packages should DEPEND on mercurial-native |
| 613 | elif scheme == "hg": |
| 614 | needsrcrev = True |
| 615 | d.appendVar("EXTRANATIVEPATH", ' python3-native ') |
| 616 | d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot') |
| 617 | |
| 618 | # Perforce packages support SRCREV = "${AUTOREV}" |
| 619 | elif scheme == "p4": |
| 620 | needsrcrev = True |
| 621 | |
| 622 | # OSC packages should DEPEND on osc-native |
| 623 | elif scheme == "osc": |
| 624 | d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot') |
| 625 | |
| 626 | elif scheme == "npm": |
| 627 | d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot') |
| 628 | |
| 629 | # *.lz4 should DEPEND on lz4-native for unpacking |
| 630 | if path.endswith('.lz4'): |
| 631 | d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot') |
| 632 | |
| 633 | # *.lz should DEPEND on lzip-native for unpacking |
| 634 | elif path.endswith('.lz'): |
| 635 | d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot') |
| 636 | |
| 637 | # *.xz should DEPEND on xz-native for unpacking |
| 638 | elif path.endswith('.xz') or path.endswith('.txz'): |
| 639 | d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') |
| 640 | |
| 641 | # .zip should DEPEND on unzip-native for unpacking |
| 642 | elif path.endswith('.zip') or path.endswith('.jar'): |
| 643 | d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot') |
| 644 | |
| 645 | # Some rpm files may be compressed internally using xz (for example, rpms from Fedora) |
| 646 | elif path.endswith('.rpm'): |
| 647 | d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') |
| 648 | |
| 649 | # *.deb should DEPEND on xz-native for unpacking |
| 650 | elif path.endswith('.deb'): |
| 651 | d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') |
| 652 | |
| 653 | if needsrcrev: |
| 654 | d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}") |
| 655 | |
| 656 | # Gather all named SRCREVs to add to the sstate hash calculation |
| 657 | # This anonymous python snippet is called multiple times so we |
| 658 | # need to be careful to not double up the appends here and cause |
| 659 | # the base hash to mismatch the task hash |
| 660 | for uri in srcuri.split(): |
| 661 | parm = bb.fetch.decodeurl(uri)[5] |
| 662 | uri_names = parm.get("name", "").split(",") |
| 663 | for uri_name in filter(None, uri_names): |
| 664 | srcrev_name = "SRCREV_{}".format(uri_name) |
| 665 | if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split(): |
| 666 | d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name)) |
| 667 | |
| 668 | set_packagetriplet(d) |
| 669 | |
| 670 | # 'multimachine' handling |
| 671 | mach_arch = d.getVar('MACHINE_ARCH') |
| 672 | pkg_arch = d.getVar('PACKAGE_ARCH') |
| 673 | |
| 674 | if (pkg_arch == mach_arch): |
| 675 | # Already machine specific - nothing further to do |
| 676 | return |
| 677 | |
| 678 | # |
| 679 | # We always try to scan SRC_URI for urls with machine overrides |
| 680 | # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0 |
| 681 | # |
| 682 | override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH') |
| 683 | if override != '0': |
| 684 | paths = [] |
| 685 | fpaths = (d.getVar('FILESPATH') or '').split(':') |
| 686 | machine = d.getVar('MACHINE') |
| 687 | for p in fpaths: |
| 688 | if os.path.basename(p) == machine and os.path.isdir(p): |
| 689 | paths.append(p) |
| 690 | |
| 691 | if paths: |
| 692 | for s in srcuri.split(): |
| 693 | if not s.startswith("file://"): |
| 694 | continue |
| 695 | fetcher = bb.fetch2.Fetch([s], d) |
| 696 | local = fetcher.localpath(s) |
| 697 | for mp in paths: |
| 698 | if local.startswith(mp): |
| 699 | #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn)) |
| 700 | d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}") |
| 701 | return |
| 702 | |
| 703 | packages = d.getVar('PACKAGES').split() |
| 704 | for pkg in packages: |
| 705 | pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg) |
| 706 | |
| 707 | # We could look for != PACKAGE_ARCH here but how to choose |
| 708 | # if multiple differences are present? |
| 709 | # Look through PACKAGE_ARCHS for the priority order? |
| 710 | if pkgarch and pkgarch == mach_arch: |
| 711 | d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}") |
| 712 | bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN")) |
| 713 | } |
| 714 | |
| 715 | addtask cleansstate after do_clean |
| 716 | python do_cleansstate() { |
| 717 | sstate_clean_cachefiles(d) |
| 718 | } |
| 719 | addtask cleanall after do_cleansstate |
| 720 | do_cleansstate[nostamp] = "1" |
| 721 | |
| 722 | python do_cleanall() { |
| 723 | src_uri = (d.getVar('SRC_URI') or "").split() |
| 724 | if not src_uri: |
| 725 | return |
| 726 | |
| 727 | try: |
| 728 | fetcher = bb.fetch2.Fetch(src_uri, d) |
| 729 | fetcher.clean() |
| 730 | except bb.fetch2.BBFetchException as e: |
| 731 | bb.fatal(str(e)) |
| 732 | } |
| 733 | do_cleanall[nostamp] = "1" |
| 734 | |
| 735 | |
| 736 | EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package |