# Place, Suite 330, Boston, MA 02111-1307 USA.
import sys, os, getopt, glob, copy, os.path, re, time
-sys.path.append(os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
+sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
import bb
-from bb import utils, data, parse, debug, event, fatal
+from bb import utils, data, parse, debug, event, fatal, cache
from sets import Set
import itertools, optparse
parsespin = itertools.cycle( r'|/-\\' )
bbdebug = 0
-__version__ = "1.3.2"
+__version__ = "1.5.0"
#============================================================================#
# BBParsingStatus
"""
def __init__(self):
- self.cache_dirty = False
self.providers = {}
+ self.rproviders = {}
+ self.packages = {}
+ self.packages_dynamic = {}
self.bbfile_priority = {}
self.bbfile_config_priorities = []
- self.ignored_depedencies = None
+ self.ignored_dependencies = None
self.possible_world = []
self.world_target = Set()
self.pkg_pn = {}
self.pkg_dp = {}
self.pn_provides = {}
self.all_depends = Set()
+ self.build_all = {}
+ self.rundeps = {}
+ self.runrecs = {}
+ self.stamp = {}
- def handle_bb_data(self, file_name, bb_data, cached):
+ def handle_bb_data(self, file_name, bb_cache, cached):
"""
We will fill the dictionaries with the stuff we
need for building the tree more fast
"""
- if bb_data == None:
- return
-
- if not cached:
- self.cache_dirty = True
-
- pn = bb.data.getVar('PN', bb_data, True)
- pv = bb.data.getVar('PV', bb_data, True)
- pr = bb.data.getVar('PR', bb_data, True)
- dp = int(bb.data.getVar('DEFAULT_PREFERENCE', bb_data, True) or "0")
- provides = Set([pn] + (bb.data.getVar("PROVIDES", bb_data, 1) or "").split())
- depends = (bb.data.getVar("DEPENDS", bb_data, True) or "").split()
+ pn = bb_cache.getVar('PN', file_name, True)
+ pv = bb_cache.getVar('PV', file_name, True)
+ pr = bb_cache.getVar('PR', file_name, True)
+ dp = int(bb_cache.getVar('DEFAULT_PREFERENCE', file_name, True) or "0")
+ provides = Set([pn] + (bb_cache.getVar("PROVIDES", file_name, True) or "").split())
+ depends = (bb_cache.getVar("DEPENDS", file_name, True) or "").split()
+ packages = (bb_cache.getVar('PACKAGES', file_name, True) or "").split()
+ packages_dynamic = (bb_cache.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split()
+ rprovides = (bb_cache.getVar("RPROVIDES", file_name, True) or "").split()
# build PackageName to FileName lookup table
if pn not in self.pkg_pn:
self.pkg_pn[pn] = []
self.pkg_pn[pn].append(file_name)
+ self.build_all[file_name] = int(bb_cache.getVar('BUILD_ALL_DEPS', file_name, True) or "0")
+ self.stamp[file_name] = bb_cache.getVar('STAMP', file_name, True)
+
# build FileName to PackageName lookup table
self.pkg_fn[file_name] = pn
self.pkg_pvpr[file_name] = (pv,pr)
for dep in depends:
self.all_depends.add(dep)
+ # Build reverse hash for PACKAGES, so runtime dependencies
+ # can be be resolved (RDEPENDS, RRECOMMENDS etc.)
+ for package in packages:
+ if not package in self.packages:
+ self.packages[package] = []
+ self.packages[package].append(file_name)
+ rprovides += (bb_cache.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split()
+
+ for package in packages_dynamic:
+ if not package in self.packages_dynamic:
+ self.packages_dynamic[package] = []
+ self.packages_dynamic[package].append(file_name)
+
+ for rprovide in rprovides:
+ if not rprovide in self.rproviders:
+ self.rproviders[rprovide] = []
+ self.rproviders[rprovide].append(file_name)
+
+ # Build hash of runtime depeneds and rececommends
+
+ def add_dep(deplist, deps):
+ for dep in deps:
+ if not dep in deplist:
+ deplist[dep] = ""
+
+ if not file_name in self.rundeps:
+ self.rundeps[file_name] = {}
+ if not file_name in self.runrecs:
+ self.runrecs[file_name] = {}
+
+ for package in packages + [pn]:
+ if not package in self.rundeps[file_name]:
+ self.rundeps[file_name][package] = {}
+ if not package in self.runrecs[file_name]:
+ self.runrecs[file_name][package] = {}
+
+ add_dep(self.rundeps[file_name][package], bb.utils.explode_deps(bb_cache.getVar('RDEPENDS', file_name, True) or ""))
+ add_dep(self.runrecs[file_name][package], bb.utils.explode_deps(bb_cache.getVar('RRECOMMENDS', file_name, True) or ""))
+ add_dep(self.rundeps[file_name][package], bb.utils.explode_deps(bb_cache.getVar("RDEPENDS_%s" % package, file_name, True) or ""))
+ add_dep(self.runrecs[file_name][package], bb.utils.explode_deps(bb_cache.getVar("RRECOMMENDS_%s" % package, file_name, True) or ""))
+
# Collect files we may need for possible world-dep
# calculations
- if not bb.data.getVar('BROKEN', bb_data, True) and not bb.data.getVar('EXCLUDE_FROM_WORLD', bb_data, True):
+ if not bb_cache.getVar('BROKEN', file_name, True) and not bb_cache.getVar('EXCLUDE_FROM_WORLD', file_name, True):
self.possible_world.append(file_name)
def __init__( self, options ):
for key, val in options.__dict__.items():
setattr( self, key, val )
- self.data = data.init()
#============================================================================#
# BBCooker
def __init__( self ):
self.build_cache_fail = []
self.build_cache = []
+ self.rbuild_cache = []
self.building_list = []
self.build_path = []
self.consider_msgs_cache = []
self.stats = BBStatistics()
self.status = None
- self.pkgdata = None
self.cache = None
+ self.bb_cache = None
def tryBuildPackage( self, fn, item, the_data ):
"""Build one package"""
self.build_cache_fail.append(fn)
raise
- def tryBuild( self, fn, virtual ):
- """Build a provider and its dependencies"""
- if fn in self.building_list:
+ def tryBuild( self, fn, virtual , buildAllDeps , build_depends = []):
+ """
+ Build a provider and its dependencies.
+ build_depends is a list of previous build dependencies (not runtime)
+ If build_depends is empty, we're dealing with a runtime depends
+ """
+
+ the_data = self.bb_cache.loadDataFull(fn, self)
+
+ # Only follow all (runtime) dependencies if doing a build
+ if not buildAllDeps and self.configuration.cmd is "build":
+ buildAllDeps = self.status.build_all[fn]
+
+ # Error on build time dependency loops
+ if build_depends and build_depends.count(fn) > 1:
bb.error("%s depends on itself (eventually)" % fn)
bb.error("upwards chain is: %s" % (" -> ".join(self.build_path)))
return False
- the_data = self.pkgdata[fn]
+ # See if this is a runtime dependency we've already built
+ # Or a build dependency being handled in a different build chain
+ if fn in self.building_list:
+ return self.addRunDeps(fn, virtual , buildAllDeps)
+
item = self.status.pkg_fn[fn]
self.building_list.append(fn)
pathstr = "%s (%s)" % (item, virtual)
self.build_path.append(pathstr)
- depends_list = (bb.data.getVar('DEPENDS', the_data, 1) or "").split()
+ depends_list = (bb.data.getVar('DEPENDS', the_data, True) or "").split()
+
if self.configuration.verbose:
bb.note("current path: %s" % (" -> ".join(self.build_path)))
bb.note("dependencies for %s are: %s" % (item, " ".join(depends_list)))
continue
if not depcmd:
continue
- if self.buildProvider( dependency ) == 0:
+ if self.buildProvider( dependency , buildAllDeps , build_depends ) == 0:
bb.error("dependency %s (for %s) not satisfied" % (dependency,item))
failed = True
if self.configuration.abort:
self.stats.deps += 1
return False
+ if not self.addRunDeps(fn, virtual , buildAllDeps):
+ return False
+
if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data):
self.build_cache.append(fn)
return True
preferred_file = None
- preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, self.configuration.data, 1)
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.setVar('OVERRIDES', "%s:%s" % (pn, data.getVar('OVERRIDES', localdata)), localdata)
+ bb.data.update_data(localdata)
+
+ preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True)
if preferred_v:
m = re.match('(.*)_(.*)', preferred_v)
if m:
else:
bb.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s" % (preferred_file, pv_str, pn))
+ del localdata
+
# get highest priority file set
files = tmp_pn[0]
latest = None
print "%-30s %20s %20s" % (p, latest[0][0] + "-" + latest[0][1],
prefstr)
+
def showEnvironment( self ):
"""Show the outer or per-package environment"""
if self.configuration.buildfile:
+ self.cb = None
+ self.bb_cache = bb.cache.init(self)
try:
- self.configuration.data, fromCache = self.load_bbfile( self.configuration.buildfile )
+ self.configuration.data = self.bb_cache.loadDataFull(self.configuration.buildfile, self)
except IOError, e:
fatal("Unable to read %s: %s" % ( self.configuration.buildfile, e ))
except Exception, e:
if data.getVarFlag( e, 'python', self.configuration.data ):
sys.__stdout__.write("\npython %s () {\n%s}\n" % (e, data.getVar(e, self.configuration.data, 1)))
- def buildProvider( self, item ):
- fn = None
+ def generateDotGraph( self, pkgs_to_build, ignore_deps ):
+ """
+ Generate two graphs one for the DEPENDS and RDEPENDS. The current
+ implementation creates crappy graphs ;)
- discriminated = False
+ pkgs_to_build A list of packages that needs to be built
+ ignore_deps A list of names where processing of dependencies
+ should be stopped. e.g. dependencies that get
+ """
- if item not in self.status.providers:
- bb.error("Nothing provides %s" % item)
- return 0
+ def myFilterProvider( providers, item):
+ """
+ Take a list of providers and filter according to environment
+ variables. In contrast to filterProviders we do not discriminate
+ and take PREFERRED_PROVIDER into account.
+ """
+ eligible = []
+ preferred_versions = {}
+
+ # Collate providers by PN
+ pkg_pn = {}
+ for p in providers:
+ pn = self.status.pkg_fn[p]
+ if pn not in pkg_pn:
+ pkg_pn[pn] = []
+ pkg_pn[pn].append(p)
- all_p = self.status.providers[item]
+ bb.debug(1, "providers for %s are: %s" % (item, pkg_pn.keys()))
- for p in all_p:
- if p in self.build_cache:
- bb.debug(1, "already built %s in this run\n" % p)
- return 1
+ for pn in pkg_pn.keys():
+ preferred_versions[pn] = self.findBestProvider(pn, pkg_pn)[2:4]
+ eligible.append(preferred_versions[pn][1])
+ for p in eligible:
+ if p in self.build_cache_fail:
+ bb.debug(1, "rejecting already-failed %s" % p)
+ eligible.remove(p)
+
+ if len(eligible) == 0:
+ bb.error("no eligible providers for %s" % item)
+ return 0
+
+ prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % package, self.configuration.data, 1)
+
+ # try the preferred provider first
+ if prefervar:
+ for p in elligible:
+ if prefervar == self.status.pkg_fn[p]:
+ bb.note("Selecting PREFERRED_PROVIDER %s" % prefervar)
+ elligible.remove(p)
+ elligible = [p] + elligible
+
+ return eligible
+
+
+
+
+ # try to avoid adding the same rdepends over an over again
+ seen_depends = []
+ seen_rdepends = []
+
+
+ def add_depends(package_list):
+ """
+ Add all depends of all packages from this list
+ """
+ for package in package_list:
+ if package in seen_depends or package in ignore_deps:
+ continue
+
+ seen_depends.append( package )
+ if not package in self.status.providers:
+ """
+ We have not seen this name -> error in
+ dependency handling
+ """
+ bb.note( "ERROR with provider: %(package)s" % vars() )
+ print >> depends_file, '"%(package)s" -> ERROR' % vars()
+ continue
+
+ # get all providers for this package
+ providers = self.status.providers[package]
+
+ # now let us find the bestProvider for it
+ fn = self.filterProviders(providers, package)[0]
+
+ depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "")
+ version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True)
+ add_depends ( depends )
+
+ # now create the node
+ print >> depends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars()
+
+ depends = filter( (lambda x: x not in ignore_deps), depends )
+ for depend in depends:
+ print >> depends_file, '"%(package)s" -> "%(depend)s"' % vars()
+
+
+ def add_all_depends( the_depends, the_rdepends ):
+ """
+ Add both DEPENDS and RDEPENDS. RDEPENDS will get dashed
+ lines
+ """
+ package_list = the_depends + the_rdepends
+ for package in package_list:
+ if package in seen_rdepends or package in ignore_deps:
+ continue
+
+ seen_rdepends.append( package )
+
+ # let us see if this is a runtime or
+ if package in the_depends:
+ if not package in self.status.providers:
+ bb.note( "ERROR with provider: %(package)s" % vars() )
+ print >> alldepends_file, '"%(package)s" -> ERROR' % vars()
+ continue
+
+ providers = self.status.providers[package]
+ elif package in the_rdepends:
+ if len(self.getProvidersRun(package)) == 0:
+ bb.note( "ERROR with rprovider: %(package)s" % vars() )
+ print >> alldepends_file, '"%(package)s" -> ERROR [style="dashed"]' % vars()
+ continue
+
+ providers = self.getProvidersRun(package)
+ else:
+ print "Complete ERROR! %s" % package
+ continue
+
+ # now let us find the bestProvider for it
+ fn = self.filterProviders(providers, package)[0]
+
+ depends = bb.utils.explode_deps(self.bb_cache.getVar('DEPENDS', fn, True) or "")
+ if fn in self.status.rundeps and package in self.status.rundeps[fn]:
+ rdepends= self.status.rundeps[fn][package].keys()
+ else:
+ rdepends = []
+ version = self.bb_cache.getVar('PV', fn, True ) + '-' + self.bb_cache.getVar('PR', fn, True)
+ if package == "task-opie-applets":
+ print fn
+ print depends
+ print depends
+ print version
+
+ add_all_depends ( depends, rdepends )
+
+ # now create the node
+ print >> alldepends_file, '"%(package)s" [label="%(package)s\\n%(version)s"]' % vars()
+
+ depends = filter( (lambda x: x not in ignore_deps), depends )
+ rdepends = filter( (lambda x: x not in ignore_deps), rdepends )
+ for depend in depends:
+ print >> alldepends_file, '"%(package)s" -> "%(depend)s"' % vars()
+ for depend in rdepends:
+ print >> alldepends_file, '"%(package)s" -> "%(depend)s" [style=dashed]' % vars()
+
+
+ # Add depends now
+ depends_file = file('depends.dot', 'w' )
+ print >> depends_file, "digraph depends {"
+ add_depends( pkgs_to_build )
+ print >> depends_file, "}"
+
+ # Add all depends now
+ alldepends_file = file('alldepends.dot', 'w' )
+ print >> alldepends_file, "digraph alldepends {"
+ add_all_depends( pkgs_to_build, [] )
+ print >> alldepends_file, "}"
+
+ def filterProviders(self, providers, item):
+ """
+ Take a list of providers and filter/reorder according to the
+ environment variables and previous build results
+ """
eligible = []
preferred_versions = {}
# Collate providers by PN
pkg_pn = {}
- for p in all_p:
+ for p in providers:
pn = self.status.pkg_fn[p]
if pn not in pkg_pn:
pkg_pn[pn] = []
# look to see if one of them is already staged, or marked as preferred.
# if so, bump it to the head of the queue
- for p in all_p:
- the_data = self.pkgdata[p]
- pn = bb.data.getVar('PN', the_data, 1)
- pv = bb.data.getVar('PV', the_data, 1)
- pr = bb.data.getVar('PR', the_data, 1)
- tmpdir = bb.data.getVar('TMPDIR', the_data, 1)
- stamp = '%s/stamps/%s-%s-%s.do_populate_staging' % (tmpdir, pn, pv, pr)
+ for p in providers:
+ pn = self.status.pkg_fn[p]
+ pv, pr = self.status.pkg_pvpr[p]
+
+ stamp = '%s.do_populate_staging' % self.status.stamp[p]
if os.path.exists(stamp):
(newvers, fn) = preferred_versions[pn]
if not fn in eligible:
oldver = "%s-%s" % (pv, pr)
newver = '-'.join(newvers)
if (newver != oldver):
- extra_chat = "; upgrading from %s to %s" % (oldver, newver)
+ extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item)
else:
- extra_chat = ""
+ extra_chat = "Selecting already-staged %s (%s) to satisfy %s" % (pn, oldver, item)
if self.configuration.verbose:
- bb.note("selecting already-staged %s to satisfy %s%s" % (pn, item, extra_chat))
+ bb.note("%s" % extra_chat)
eligible.remove(fn)
eligible = [fn] + eligible
discriminated = True
break
+ return eligible
+
+ def buildProvider( self, item , buildAllDeps , build_depends = [] ):
+ """
+ Build something to provide a named build requirement
+ (takes item names from DEPENDS namespace)
+ """
+
+ fn = None
+ discriminated = False
+
+ if not item in self.status.providers:
+ bb.error("Nothing provides dependency %s" % item)
+ bb.event.fire(bb.event.NoProvider(item,self.configuration.data))
+ return 0
+
+ all_p = self.status.providers[item]
+
+ for p in all_p:
+ if p in self.build_cache:
+ bb.debug(1, "already built %s in this run\n" % p)
+ return 1
+
+ eligible = self.filterProviders(all_p, item)
+
+ if not eligible:
+ return 0
+
prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, self.configuration.data, 1)
if prefervar:
self.preferred[item] = prefervar
providers_list.append(self.status.pkg_fn[fn])
bb.note("multiple providers are available (%s);" % ", ".join(providers_list))
bb.note("consider defining PREFERRED_PROVIDER_%s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data))
self.consider_msgs_cache.append(item)
# run through the list until we find one that we can build
for fn in eligible:
bb.debug(2, "selecting %s to satisfy %s" % (fn, item))
- if self.tryBuild(fn, item):
+ if self.tryBuild(fn, item, buildAllDeps, build_depends + [fn]):
return 1
bb.note("no buildable providers for %s" % item)
+ bb.event.fire(bb.event.NoProvider(item,self.configuration.data))
return 0
+ def buildRProvider( self, item , buildAllDeps ):
+ """
+ Build something to provide a named runtime requirement
+ (takes item names from RDEPENDS/PACKAGES namespace)
+ """
+
+ fn = None
+ all_p = []
+ discriminated = False
+
+ if not buildAllDeps:
+ return True
+
+ all_p = self.getProvidersRun(item)
+
+ if not all_p:
+ bb.error("Nothing provides runtime dependency %s" % (item))
+ bb.event.fire(bb.event.NoProvider(item,self.configuration.data,runtime=True))
+ return False
+
+ for p in all_p:
+ if p in self.rbuild_cache:
+ bb.debug(2, "Already built %s providing runtime %s\n" % (p,item))
+ return True
+ if p in self.build_cache:
+ bb.debug(2, "Already built %s but adding any further RDEPENDS for %s\n" % (p, item))
+ return self.addRunDeps(p, item , buildAllDeps)
+
+ eligible = self.filterProviders(all_p, item)
+ if not eligible:
+ return 0
+
+ preferred = []
+ for p in eligible:
+ pn = self.status.pkg_fn[p]
+ provides = self.status.pn_provides[pn]
+ for provide in provides:
+ prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, self.configuration.data, 1)
+ if prefervar == pn:
+ if self.configuration.verbose:
+ bb.note("selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item))
+ eligible.remove(p)
+ eligible = [p] + eligible
+ preferred.append(p)
+
+ if len(eligible) > 1 and len(preferred) == 0:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in eligible:
+ providers_list.append(self.status.pkg_fn[fn])
+ bb.note("multiple providers are available (%s);" % ", ".join(providers_list))
+ bb.note("consider defining a PREFERRED_PROVIDER to match runtime %s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data,runtime=True))
+ self.consider_msgs_cache.append(item)
+
+ if len(preferred) > 1:
+ if item not in self.consider_msgs_cache:
+ providers_list = []
+ for fn in preferred:
+ providers_list.append(self.status.pkg_fn[fn])
+ bb.note("multiple preferred providers are available (%s);" % ", ".join(providers_list))
+ bb.note("consider defining only one PREFERRED_PROVIDER to match runtime %s" % item)
+ bb.event.fire(bb.event.MultipleProviders(item,providers_list,self.configuration.data,runtime=True))
+ self.consider_msgs_cache.append(item)
+
+ # run through the list until we find one that we can build
+ for fn in eligible:
+ bb.debug(2, "selecting %s to satisfy runtime %s" % (fn, item))
+ if self.tryBuild(fn, item, buildAllDeps):
+ return True
+
+ bb.error("No buildable providers for runtime %s" % item)
+ bb.event.fire(bb.event.NoProvider(item,self.configuration.data))
+ return False
+
+ def getProvidersRun(self, rdepend):
+ """
+ Return any potential providers of runtime rdepend
+ """
+ rproviders = []
+
+ if rdepend in self.status.rproviders:
+ rproviders += self.status.rproviders[rdepend]
+
+ if rdepend in self.status.packages:
+ rproviders += self.status.packages[rdepend]
+
+ if rproviders:
+ return rproviders
+
+ # Only search dynamic packages if we can't find anything in other variables
+ for pattern in self.status.packages_dynamic:
+ regexp = re.compile(pattern)
+ if regexp.match(rdepend):
+ rproviders += self.status.packages_dynamic[pattern]
+
+ return rproviders
+
+ def addRunDeps(self , fn, item , buildAllDeps):
+ """
+ Add any runtime dependencies of runtime item provided by fn
+ as long as item has't previously been processed by this function.
+ """
+
+ if item in self.rbuild_cache:
+ return True
+
+ if not buildAllDeps:
+ return True
+
+ rdepends = []
+ self.rbuild_cache.append(item)
+
+ if fn in self.status.rundeps and item in self.status.rundeps[fn]:
+ rdepends += self.status.rundeps[fn][item].keys()
+ if fn in self.status.runrecs and item in self.status.runrecs[fn]:
+ rdepends += self.status.runrecs[fn][item].keys()
+
+ bb.debug(2, "Additional runtime dependencies for %s are: %s" % (item, " ".join(rdepends)))
+
+ for rdepend in rdepends:
+ if rdepend in self.status.ignored_dependencies:
+ continue
+ if not self.buildRProvider(rdepend, buildAllDeps):
+ return False
+ return True
+
def buildDepgraph( self ):
all_depends = self.status.all_depends
pn_provides = self.status.pn_provides
+ localdata = data.createCopy(self.configuration.data)
+ bb.data.update_data(localdata)
+
def calc_bbfile_priority(filename):
for (regex, pri) in self.status.bbfile_config_priorities:
if regex.match(filename):
return 0
# Handle PREFERRED_PROVIDERS
- for p in (bb.data.getVar('PREFERRED_PROVIDERS', self.configuration.data, 1) or "").split():
+ for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split():
(providee, provider) = p.split(':')
if providee in self.preferred and self.preferred[providee] != provider:
bb.error("conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.preferred[providee]))
self.preferred[providee] = provider
# Calculate priorities for each file
- for p in self.pkgdata.keys():
+ for p in self.status.pkg_fn.keys():
self.status.bbfile_priority[p] = calc_bbfile_priority(p)
- # Build package list for "bitbake world"
+ def buildWorldTargetList(self):
+ """
+ Build package list for "bitbake world"
+ """
+ all_depends = self.status.all_depends
+ pn_provides = self.status.pn_provides
bb.debug(1, "collating packages for \"world\"")
for f in self.status.possible_world:
terminal = True
self.status.possible_world = None
self.status.all_depends = None
- def myProgressCallback( self, x, y, f, file_data, from_cache ):
+ def myProgressCallback( self, x, y, f, bb_cache, from_cache ):
# feed the status with new input
- self.status.handle_bb_data(f, file_data, from_cache)
+
+ self.status.handle_bb_data(f, bb_cache, from_cache)
if bbdebug > 0:
return
def parseConfigurationFile( self, afile ):
try:
self.configuration.data = bb.parse.handle( afile, self.configuration.data )
+
+ # Add the handlers we inherited by INHERIT
+ # we need to do this manually as it is not guranteed
+ # we will pick up these classes... as we only INHERIT
+ # on .inc and .bb files but not on .conf
+ data = bb.data.createCopy( self.configuration.data )
+ inherits = ["base"] + (bb.data.getVar('INHERIT', data, True ) or "").split()
+ for inherit in inherits:
+ data = bb.parse.handle( os.path.join('classes', '%s.bbclass' % inherit ), data, True )
+
+ # FIXME: This assumes that we included at least one .inc file
+ for var in bb.data.keys(data):
+ if bb.data.getVarFlag(var, 'handler', data):
+ bb.event.register(var,bb.data.getVar(var, data))
+
except IOError:
bb.fatal( "Unable to open %s" % afile )
except bb.parse.ParseError, details:
def cook( self, configuration, args ):
+ """
+ We are building stuff here. We do the building
+ from here. By default we try to execute task
+ build.
+ """
+
self.configuration = configuration
if not self.configuration.cmd:
self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) )
+
+ #
+ # Special updated configuration we use for firing events
+ #
+ self.configuration.event_data = bb.data.createCopy(self.configuration.data)
+ bb.data.update_data(self.configuration.event_data)
+
if self.configuration.show_environment:
self.showEnvironment()
sys.exit( 0 )
print "Requested parsing .bb files only. Exiting."
return
- bb.data.update_data( self.configuration.data )
+
self.buildDepgraph()
if self.configuration.show_versions:
self.showVersions()
sys.exit( 0 )
if 'world' in pkgs_to_build:
+ self.buildWorldTargetList()
pkgs_to_build.remove('world')
for t in self.status.world_target:
pkgs_to_build.append(t)
- bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.data))
+ if self.configuration.dot_graph:
+ self.generateDotGraph( pkgs_to_build, self.configuration.ignored_dot_deps )
+ sys.exit( 0 )
+
+
+ bb.event.fire(bb.event.BuildStarted(buildname, pkgs_to_build, self.configuration.event_data))
+ failures = 0
for k in pkgs_to_build:
failed = False
try:
- if self.buildProvider( k ) == 0:
+ if self.buildProvider( k , False ) == 0:
# already diagnosed
failed = True
except bb.build.EventException:
failed = True
if failed:
+ failures += failures
if self.configuration.abort:
sys.exit(1)
- bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.data))
+ bb.event.fire(bb.event.BuildCompleted(buildname, pkgs_to_build, self.configuration.event_data, failures))
sys.exit( self.stats.show() )
return []
return finddata.readlines()
- def deps_clean(self, d):
- depstr = data.getVar('__depends', d)
- if depstr:
- deps = depstr.split(" ")
- for dep in deps:
- (f,old_mtime_s) = dep.split("@")
- old_mtime = int(old_mtime_s)
- new_mtime = parse.cached_mtime(f)
- if (new_mtime > old_mtime):
- return False
- return True
-
- def load_bbfile( self, bbfile ):
- """Load and parse one .bb build file"""
-
- if not self.cache in [None, '']:
- # get the times
- cache_mtime = data.init_db_mtime(self.cache, bbfile)
- file_mtime = parse.cached_mtime(bbfile)
-
- if file_mtime > cache_mtime:
- #print " : '%s' dirty. reparsing..." % bbfile
- pass
- else:
- #print " : '%s' clean. loading from cache..." % bbfile
- cache_data = data.init_db( self.cache, bbfile, False )
- if self.deps_clean(cache_data):
- return cache_data, True
-
- topdir = data.getVar('TOPDIR', self.configuration.data)
- if not topdir:
- topdir = os.path.abspath(os.getcwd())
- # set topdir to here
- data.setVar('TOPDIR', topdir, self.configuration)
- bbfile = os.path.abspath(bbfile)
- bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
- # expand tmpdir to include this topdir
- data.setVar('TMPDIR', data.getVar('TMPDIR', self.configuration.data, 1) or "", self.configuration.data)
- # set topdir to location of .bb file
- topdir = bbfile_loc
- #data.setVar('TOPDIR', topdir, cfg)
- # go there
- oldpath = os.path.abspath(os.getcwd())
- os.chdir(topdir)
- bb = data.init_db(self.cache,bbfile, True, self.configuration.data)
- try:
- parse.handle(bbfile, bb) # read .bb data
- if not self.cache in [None, '']:
- bb.commit(parse.cached_mtime(bbfile)) # write cache
- os.chdir(oldpath)
- return bb, False
- finally:
- os.chdir(oldpath)
-
def collect_bbfiles( self, progressCallback ):
"""Collect all available .bb build files"""
self.cb = progressCallback
parsed, cached, skipped, masked = 0, 0, 0, 0
- self.cache = bb.data.getVar( "CACHE", self.configuration.data, 1 )
- self.pkgdata = data.pkgdata( not self.cache in [None, ''], self.cache, self.configuration.data )
+ self.bb_cache = bb.cache.init(self)
- if not self.cache in [None, '']:
- if self.cb is not None:
- print "NOTE: Using cache in '%s'" % self.cache
- try:
- os.stat( self.cache )
- except OSError:
- bb.mkdirhier( self.cache )
- else:
- if self.cb is not None:
- print "NOTE: Not using a cache. Set CACHE = <directory> to enable."
files = (data.getVar( "BBFILES", self.configuration.data, 1 ) or "").split()
data.setVar("BBFILES", " ".join(files), self.configuration.data)
# read a file's metadata
try:
- bb_data, fromCache = self.load_bbfile(f)
- if fromCache: cached += 1
+ fromCache, skip = self.bb_cache.loadData(f, self)
+ if skip:
+ skipped += 1
+ #bb.note("Skipping %s" % f)
+ self.bb_cache.skip(f)
+ continue
+ elif fromCache: cached += 1
else: parsed += 1
deps = None
- if bb_data is not None:
- # allow metadata files to add items to BBFILES
- #data.update_data(self.pkgdata[f])
- addbbfiles = data.getVar('BBFILES', bb_data) or None
- if addbbfiles:
- for aof in addbbfiles.split():
- if not files.count(aof):
- if not os.path.isabs(aof):
- aof = os.path.join(os.path.dirname(f),aof)
- files.append(aof)
- for var in bb_data.keys():
- if data.getVarFlag(var, "handler", bb_data) and data.getVar(var, bb_data):
- event.register(data.getVar(var, bb_data))
- self.pkgdata[f] = bb_data
+
+ # allow metadata files to add items to BBFILES
+ #data.update_data(self.pkgdata[f])
+ addbbfiles = self.bb_cache.getVar('BBFILES', f, False) or None
+ if addbbfiles:
+ for aof in addbbfiles.split():
+ if not files.count(aof):
+ if not os.path.isabs(aof):
+ aof = os.path.join(os.path.dirname(f),aof)
+ files.append(aof)
# now inform the caller
if self.cb is not None:
- self.cb( i + 1, len( newfiles ), f, bb_data, fromCache )
+ self.cb( i + 1, len( newfiles ), f, self.bb_cache, fromCache )
except IOError, e:
+ self.bb_cache.remove(f)
bb.error("opening %s: %s" % (f, e))
pass
- except bb.parse.SkipPackage:
- skipped += 1
- pass
except KeyboardInterrupt:
+ self.bb_cache.sync()
raise
except Exception, e:
+ self.bb_cache.remove(f)
bb.error("%s while parsing %s" % (e, f))
+ except:
+ self.bb_cache.remove(f)
+ raise
if self.cb is not None:
print "\rNOTE: Parsing finished. %d cached, %d parsed, %d skipped, %d masked." % ( cached, parsed, skipped, masked ),
+ self.bb_cache.sync()
+
#============================================================================#
# main
#============================================================================#
-if __name__ == "__main__":
-
+def main():
parser = optparse.OptionParser( version = "BitBake Build Tool Core version %s, %%prog version %s" % ( bb.__version__, __version__ ),
usage = """%prog [options] [package ...]
parser.add_option( "-f", "--force", help = "force run of specified cmd, regardless of stamp status",
action = "store_true", dest = "force", default = False )
- parser.add_option( "-i", "--interactive", help = "drop into the interactive mode.",
+ parser.add_option( "-i", "--interactive", help = "drop into the interactive mode also called the BitBake shell.",
action = "store_true", dest = "interactive", default = False )
- parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing)",
+ parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtaks tasks is defined and will show available tasks",
action = "store", dest = "cmd", default = "build" )
parser.add_option( "-r", "--read", help = "read the specified file before bitbake.conf",
parser.add_option( "-v", "--verbose", help = "output more chit-chat to the terminal",
action = "store_true", dest = "verbose", default = False )
- parser.add_option( "-D", "--debug", help = "Increase the debug level",
+ parser.add_option( "-D", "--debug", help = "Increase the debug level. You can specify this more than once.",
action = "count", dest="debug", default = 0)
parser.add_option( "-n", "--dry-run", help = "don't execute, just go through the motions",
parser.add_option( "-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)",
action = "store_true", dest = "show_environment", default = False )
+ parser.add_option( "-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax",
+ action = "store_true", dest = "dot_graph", default = False )
+ parser.add_option( "-I", "--ignore-deps", help = """Stop processing at the given list of dependencies when generating dependency graphs. This can help to make the graph more appealing""",
+ action = "append", dest = "ignored_dot_deps", default = [] )
+
+
options, args = parser.parse_args( sys.argv )
cooker = BBCooker()
cooker.cook( BBConfiguration( options ), args[1:] )
+
+
+
+if __name__ == "__main__":
+ print """WARNING, WARNING, WARNING
+This is a Bitbake from the Unstable/Development Branch.
+You might want to use the bitbake-1.4 stable branch (if you are not a BitBake developer or tester). I'm going to sleep 5 seconds now to make sure you see that."""
+ import time
+ time.sleep(5)
+ main()