diff --git a/.gitignore b/.gitignore index 2bc03a8d..46270061 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ *.pyo +*.pyc +*.db diff --git a/scripts/buildbot.sh b/scripts/buildbot.sh new file mode 100755 index 00000000..05d1b945 --- /dev/null +++ b/scripts/buildbot.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +dobuild() { + local build=$2 + local subarch=$3 + # buildrepo returns True for this argument if last build had a stage1 built too (non-freshen), otherwise False + local full=$4 + local buildtype=$1 + if [ "$full" = "True" ]; then + buildtype="freshen" + fi + if [ "$build" = "funtoo-current" ]; then + if [ "$subarch" = "corei7" ]; then + buildtype="$buildtype+openvz" + fi + if [ "$subarch" = "core2_64" ]; then + buildtype="$buildtype+openvz" + fi + if [ "$subarch" = "generic_64" ]; then + buildtype="$buildtype+openvz" + fi + fi + if [ "$build" != "" ] && [ "$subarch" != "" ] && [ "$buildtype" != "" ]; then + echo "Building $build $subarch $buildtype" + exec /root/git/metro/scripts/ezbuild.sh $build $subarch $buildtype + else + echo "Couldn't determine build, subarch and build type. Exiting." + exit 1 + fi +} +( cd /root/git/metro; git pull ) +if [ "$1" = "" ]; then + echo "Please specify type of build (full, full+openvz, etc.)" + exit 1 +fi +export METRO_BUILDS="funtoo-current funtoo-stable funtoo-experimental" +export STALE_DAYS=3 +cd /var/tmp +a=$(/root/git/metro/scripts/buildrepo nextbuild) +if [ $? -eq 1 ]; then + # we are current + exit 0 +elif [ $? -eq 2 ]; then + # error + echo "buildrepo error: (re-doing to get full output):" + /root/git/metro/scripts/buildrepo nextbuild + exit 1 +fi +# otherwise, build what needs to be built: +dobuild full $a diff --git a/scripts/buildrepo b/scripts/buildrepo new file mode 100755 index 00000000..e67732e5 --- /dev/null +++ b/scripts/buildrepo @@ -0,0 +1,209 @@ +#!/usr/bin/python2 + +import os +import sys +import glob +import datetime +from db import * + +builds = ( + "funtoo-experimental", + "funtoo-current", + "funtoo-stable", + "gentoo-stable", +) + +arches = ( + "x86-32bit", + "x86-64bit", + "sparc-64bit" +) + +subarches = ( + "atom_32", + "atom_64", + "corei7", + "i486", + "i686", + "athlon-xp", + "pentium4", + "core2_32", + "amd64-k8_32", + "amd64-k8", + "amd64-k10", + "core2_64", + "generic_64", + "ultrasparc", + "ultrasparc3", + "niagara", + "niagara2", + "generic_sparcv9" +) + +class SubArch(dbobject): + @classmethod + def _makeTable(cls,db): + cls.db = db + cls.__table__ = Table('subarch', db.metadata, + Column('id', Integer, primary_key = True), + Column('date', DateTime, index=True), + Column('date_str', String, index=True), + Column('path', String, index=True), + Column('build', String, index=True), + Column('arch', String, index=True), + Column('subarch', String, index=True), + ) + +class BuildDir(dbobject): + @classmethod + def _makeTable(cls,db): + cls.db = db + cls.__table__ = Table('bdir', db.metadata, + Column('id', Integer, primary_key = True), + Column('date', DateTime, index=True), + Column('path', String, index=True), + Column('build', String, index=True), + Column('arch', String, index=True), + Column('subarch', String, index=True), + Column('date_str', String, index=True), + Column('complete', Boolean, index=True), + Column('full', Boolean, index=True) + ) + +class Snapshot(dbobject): + @classmethod + def _makeTable(cls,db): + cls.db = db + cls.__table__ = Table('snapshot', db.metadata, + Column('id', Integer, primary_key = True), + Column('path', String, index=True), + Column('build', String, index=True), + ) + +class RepositoryDatabase(Database): + __database__ = "sqlite:///cleaner.db" + def __init__(self): + Database.__init__(self,[BuildDir, Snapshot, SubArch]) + self.associate() + def associate(self): + Database.associate(self,self.__database__) + +initial_path = "/home/mirror/linux" + +if __name__ == "__main__": + if os.path.exists("cleaner.db"): + os.unlink("cleaner.db") + db = RepositoryDatabase() + session = db.session + for build in builds: + if not os.path.exists("%s/%s" % (initial_path, build)): + continue + snapdir = "%s/%s/snapshots" % ( initial_path, build ) + if os.path.isdir(snapdir) and not os.path.islink(snapdir): + for match in glob.glob("%s/portage-*.tar.xz" % snapdir): + basename = os.path.basename(match) + if basename == "portage-current.tar.xz": + continue + sna = Snapshot() + sna.path = match + sna.build = build + session.add(sna) + for arch in arches: + if not os.path.exists("%s/%s/%s" % ( initial_path, build, arch )): + continue + for subarch in subarches: + path = "%s/%s/%s/%s" % (initial_path, build, arch, subarch) + if not os.path.exists(path): + continue + for instance in os.listdir(path): + ipath = "%s/%s" % ( path, instance ) + if not os.path.isdir(ipath): + continue + if instance == ".control": + ctlfile = ipath + "/version/stage3" + if os.path.exists(ctlfile): + a = open(ctlfile,"r") + mydate = a.readlines()[0].strip() + date = datetime.datetime.strptime(mydate,"%Y-%m-%d") + mtime_path = path + "/" + mydate + for match in glob.glob("%s/stage3*.tar.*" % mtime_path): + mtime_date = datetime.datetime.fromtimestamp(os.path.getmtime(match)) + sa = SubArch() + sa.path = ipath + sa.date = mtime_date + sa.date_str = mydate + sa.build = build + sa.arch = arch + sa.subarch = subarch + session.add(sa) + a.close() + else: + bdir = BuildDir() + bdir.path = ipath + bdir.date_str = instance + bdir.build = build + bdir.arch = arch + bdir.subarch = subarch + bdir.complete = False + bdir.full = False + for match in glob.glob("%s/stage3*.tar.*" % ipath): + bdir.complete = True + break + if bdir.complete: + for match in glob.glob("%s/stage1*.tar.*" % ipath): + bdir.full = True + break + session.add(bdir) + session.commit() + +if len(sys.argv) > 1 and sys.argv[1] == "clean": + for build in builds: + for arch in arches: + for subarch in subarches: + out = session.query(BuildDir).filter_by(build=build).filter_by(arch=arch).filter_by(subarch=subarch).filter_by(complete=True).order_by(BuildDir.date_str).all() + for x in out[0:-3]: + print("rm -rf %s" % x.path) + for x in out[-3:]: + print("# keeping %s" % x.path) + sna = session.query(Snapshot).filter_by(build=build).order_by(Snapshot.path).all() + for x in sna[0:-2]: + print("rm %s" % x.path) + for x in sna[-2:]: + print("# keeping %s" % x.path) + for incomplete in session.query(BuildDir).filter_by(complete=False): + print("rm -rf %s # not complete" % incomplete.path) +elif len(sys.argv) > 1 and sys.argv[1] == "nextbuild": + if "METRO_BUILDS" not in os.environ: + print("Please set METRO_BUILDS env var to space-separated lists of builds, in order of preference.") + sys.exit(2) + env_builds = os.environ["METRO_BUILDS"].split() + stale_days = 4 + if "STALE_DAYS" in os.environ: + try: + stale_days =int(os.environ["STALE_DAYS"]) + print("Using stale_days of %s" % stale_days) + except: + print("STALE_DAYS env var is not an integer. Please fix.") + sys.exit(2) + now = datetime.datetime.now() + sa = session.query(SubArch) + for build in env_builds: + if build not in builds: + print("# Build %s not recognized; skipping." % builds) + sys.exit(2) + # grab all subarches for this build... + sa2 = sa.filter_by(build=build) + # order subarches by date, oldest to newest, and iterate over them: + for x in sa2.order_by(SubArch.__table__.c.date): + # if something is newer than 4 days old, it is not considered stale, so we skip over it: + if now - x.date < datetime.timedelta(days=stale_days): + continue + # otherwise, we have found the next thing we should try to build. Output important info to stdout: + else: + # output: build subarch was-last-build-full(had-a-stage-1)(boolean) date + print x.build, x.subarch, + b = session.query(BuildDir).filter_by(build=x.build).filter_by(subarch=x.subarch).filter_by(date_str=x.date_str).one() + print b.full, x.date + exit(0) + # if we are totally current, exit with non-zero return value + exit(1) diff --git a/scripts/db.py b/scripts/db.py new file mode 100755 index 00000000..ca9839f2 --- /dev/null +++ b/scripts/db.py @@ -0,0 +1,143 @@ +#!/usr/bin/python2 + +''' + +This module is a fourth attempt at some clean design patterns for encapsulating +SQLAlchemy database objects, so that they can more easily be embedded in other +objects. This code takes advantage of the SQLAlchemy ORM but purposely DOES NOT +USE SQLAlchemy's declarative syntax. This is intentional because I've come to +the conclusion (after using declarative for a long time) that it's a pain in +the butt, not well documented, and hides a lot of the power of SQLAlchemy, so +it's a liability to use it. + +Instead of using a declarative_base, database objects are simply derived from +object,and contain a _mapTable() method. This method creates the Table object +and maps this new table to the class. This method is called by the Database +object when it is initialized: + +orm = Database([User]) + +Above, we create a new Database object (to hold metadata, engine and session +information,) and we pass it a list or tuple of all objects to include as part +of our Database. Above, when Database's __init__() method is called, it will ensure +that the User class' _mapTable() method is called, so that the User table is +associated with our Database, and that these tables are created in the underlying +metadata. + +This design pattern is created to allow for the creation of a library of +different kinds of database-aware objects, such as our user object. Then, other +code can import this code, and create a database schema with one or more of +these objects very easily: + +orm = Database([Class1, Class2, Class3]) + +Classes that should be part of the Database can be included, and those that we +don't want can be omitted. + +We could also create two or more schemas: + +user_db = Database([User]) +user_db.associate(engine="sqlite:///users.db") + +product_db = Database([Product, ProductID, ProductCategory]) +product_db.associate(engine="sqlite:///products.db") + +tmp_db = Database([TmpObj, TmpObj2]) +tmp_db.associate(engine="sqlite:///:memory:") + +Or two different types of User objects: + +class OtherUser(User): + pass + +user_db = Database([User]) +other_user_db = Database([OtherUser]) + +Since all the session, engine and metadata stuff is encapsulated inside the +Database instances, this makes it a lot easier to use multiple database engines +from the same source code. At least, it provides a framework to make this a lot +less confusing: + +for u in userdb.session.Query(User).all(): + print u + +''' +import logging +from sqlalchemy import * +from sqlalchemy.orm import * +from sqlalchemy.ext.orderinglist import ordering_list + +logging.basicConfig(level=logging.DEBUG) + +class DatabaseError(Exception): + def __init__(self, value): + self.value = value + def __str__(self): + return self.value + +''' +dbobject is a handy object to use as a base class for your database-aware objects. However, +using it is optional. It is perfectly OK to subclass a standard python new-style object. +''' + +class dbobject(object): + def __init__(self,id=None): + self.id = id + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, self.id) + @classmethod + def _mapTable(cls,db): + mapper(cls, cls.__table__, primary_key=[cls.__table__.c.id]) + +class Database(object): + + def __init__(self,objs=[],engine=None): + self._dbobj = objs + self._tables = {} + self.engine = None + self._session = None + self._autodict = {} + self.metadata = MetaData() + self.sessionmaker = None + if engine != None: + self.associate(engine) + + def autoName(self,name): + if name not in self._autodict: + self._autodict[name] = 0 + self._autodict[name] += 1 + return name % self._autodict[name] + + def IntegerPrimaryKey(self,name): + return Column(name, Integer, Sequence(self.autoName("id_seq_%s"), optional=True), primary_key=True) + + def UniqueString(self,name,length=80,index=True, nullable=False): + return Column(name, String(length), unique=True, index=index, nullable=nullable) + + def associate(self,engine="sqlite:///:memory:"): + self.engine = create_engine(engine) + self.metadata.bind = self.engine + self.initORM() + self.initSession() + self.createDatabaseTables() + + def initORM(self): + for cls in self._dbobj: + cls._makeTable(self) + for cls in self._dbobj: + cls._mapTable(self) + + def createDatabaseTables(self): + self.metadata.create_all() + + def initSession(self): + self.sessionmaker = sessionmaker(bind=self.engine, class_=None) + + @property + def session(self): + if self.sessionmaker == None: + raise DatabaseError("Database not associated with engine") + if self._session == None: + self._session = scoped_session(self.sessionmaker) + return self._session + diff --git a/targets/gentoo/steps/stage.spec b/targets/gentoo/steps/stage.spec index 0bf1bd70..afa28de3 100644 --- a/targets/gentoo/steps/stage.spec +++ b/targets/gentoo/steps/stage.spec @@ -186,7 +186,7 @@ then else # stage1 - make sure we include our make.conf and profile link... pf="" - pf=$[profile/format:zap] + pf="$[profile/format:zap]" rm -f $ROOT/etc/make.conf $ROOT/etc/portage/make.conf if [ -e /etc/make.conf ]; then mkconf=/etc/make.conf