From: Marcin Krol Date: Mon, 9 Jul 2012 21:29:57 +0000 (+0200) Subject: - from https://github.com/pld-linux/pld-builder.new X-Git-Url: https://git.tld-linux.org/?a=commitdiff_plain;h=90809c8fec988489786ce00247d9a4150070748b;p=TLD.git - from https://github.com/pld-linux/pld-builder.new --- diff --git a/pld-builder.new/.cvsignore b/pld-builder.new/.cvsignore new file mode 100644 index 0000000..2b41d7a --- /dev/null +++ b/pld-builder.new/.cvsignore @@ -0,0 +1,5 @@ +*.pyc +lock +log +spool +www diff --git a/pld-builder.new/Makefile b/pld-builder.new/Makefile new file mode 100644 index 0000000..e82dc21 --- /dev/null +++ b/pld-builder.new/Makefile @@ -0,0 +1,22 @@ +PACKAGE := pld-builder +VERSION := 0.5 +SNAP := $(shell date +%Y%m%d) + +# for make dist +CVSROOT := :pserver:cvs@cvs.pld-linux.org:/cvsroot +CVSMODULE := pld-builder.new +CVSTAG := HEAD + +all: + python -c "import compileall; compileall.compile_dir('.')" + +clean: + find -name '*.pyc' | xargs rm -f + +dist: + rm -rf $(PACKAGE)-$(VERSION).$(SNAP) + mkdir -p $(PACKAGE)-$(VERSION).$(SNAP) + cvs -d $(CVSROOT) export -d $(PACKAGE)-$(VERSION).$(SNAP) -r $(CVSTAG) $(CVSMODULE) + tar -cjf $(PACKAGE)-$(VERSION).$(SNAP).tar.bz2 $(PACKAGE)-$(VERSION).$(SNAP) + rm -rf $(PACKAGE)-$(VERSION).$(SNAP) + test -x ./dropin && ./dropin $(PACKAGE)-$(VERSION).$(SNAP).tar.bz2 diff --git a/pld-builder.new/PLD_Builder/.cvsignore b/pld-builder.new/PLD_Builder/.cvsignore new file mode 100644 index 0000000..0d20b64 --- /dev/null +++ b/pld-builder.new/PLD_Builder/.cvsignore @@ -0,0 +1 @@ +*.pyc diff --git a/pld-builder.new/PLD_Builder/__init__.py b/pld-builder.new/PLD_Builder/__init__.py new file mode 100644 index 0000000..1bb8bf6 --- /dev/null +++ b/pld-builder.new/PLD_Builder/__init__.py @@ -0,0 +1 @@ +# empty diff --git a/pld-builder.new/PLD_Builder/acl.py b/pld-builder.new/PLD_Builder/acl.py new file mode 100644 index 0000000..a769d06 --- /dev/null +++ b/pld-builder.new/PLD_Builder/acl.py @@ -0,0 +1,143 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import ConfigParser +import string +import fnmatch +import os +import stat + +import path +import log +import status +from mailer import Message +from config import config + +class User: + def __init__(self, p, login): + self.login = login + self.privs = [] + self.gpg_emails = [] + self.mailto = "" + + if p.has_option(login, "gpg_emails"): + self.gpg_emails = string.split(p.get(login, "gpg_emails")) + else: + log.panic("acl: [%s] has no gpg_emails" % login) + + if p.has_option(login, "mailto"): + self.mailto = p.get(login, "mailto") + else: + if len(self.gpg_emails) > 0: + self.mailto = self.gpg_emails[0] + + if p.has_option(login, "privs"): + for p in string.split(p.get(login, "privs")): + l = string.split(p, ":") + if len(l) == 2: + p+=":*" + if len(l) not in (2,3) or l[0] == "" or l[1] == "": + log.panic("acl: invalid priv format: '%s' [%s]" % (p, login)) + else: + self.privs.append(p) + else: + log.panic("acl: [%s] has no privs" % login) + + def can_do(self, what, where, branch=None): + if branch: + action = "%s:%s:%s" % (what, where, branch) + else: + action = "%s:%s:N-A" % (what, where) + for priv in self.privs: + if priv[0] == "!": + ret = 0 + priv = priv[1:] + else: + ret = 1 + pwhat,pwhere,pbranch=priv.split(":") + for pbranch in pbranch.split(","): + priv="%s:%s:%s" % (pwhat,pwhere,pbranch) + if fnmatch.fnmatch(action, priv): + return ret + return 0 + + def check_priority(self, prio, where): + for priv in self.privs: + val,builder=priv.split(":")[0:2] + if fnmatch.fnmatch(where, builder): + try: + val=int(val) + except ValueError: + continue + if prio>=val: + return prio + else: + return val + return prio + + def mail_to(self): + return self.mailto + + def message_to(self): + m = Message() + m.set_headers(to = self.mail_to(), cc = config.builder_list) + return m + + def get_login(self): + return self.login + +class ACL_Conf: + def __init__(self): + self.reload() + + def try_reload(self): + mtime = os.stat(path.acl_conf)[stat.ST_MTIME] + if mtime != self.acl_conf_mtime: + log.notice("acl.conf has changed, reloading...") + self.reload() + return True + return False + + def reload(self): + self.acl_conf_mtime = os.stat(path.acl_conf)[stat.ST_MTIME] + self.current_user = None + status.push("reading acl.conf") + p = ConfigParser.ConfigParser() + p.readfp(open(path.acl_conf)) + self.users = {} + for login in p.sections(): + if self.users.has_key(login): + log.panic("acl: duplicate login: %s" % login) + continue + user = User(p, login) + for e in user.gpg_emails: + if self.users.has_key(e): + log.panic("acl: user email colision %s <-> %s" % \ + (self.users[e].login, login)) + else: + self.users[e] = user + self.users[login] = user + status.pop() + + def user_by_email(self, ems): + for e in ems: + if self.users.has_key(e): + return self.users[e] + return None + + def user(self, l): + if not self.users.has_key(l): + log.panic("no such user: %s" % l) + return self.users[l] + + def set_current_user(self, u): + self.current_user = u + if u != None: + status.email = u.mail_to() + + def current_user_login(self): + if self.current_user != None: + return self.current_user.login + else: + return "" + +acl = ACL_Conf() diff --git a/pld-builder.new/PLD_Builder/bqueue.py b/pld-builder.new/PLD_Builder/bqueue.py new file mode 100644 index 0000000..aff32e9 --- /dev/null +++ b/pld-builder.new/PLD_Builder/bqueue.py @@ -0,0 +1,143 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import re +import gzip +import time +import StringIO +import os +import fcntl +import string +import tempfile + +# PLD_Builder: +import gpg +import request +import util +import log + +class B_Queue: + def __init__(self, filename): + self.name = filename + self.requests = [] + self.fd = None + + def dump(self, fname): + (fdno, tmpfname) = tempfile.mkstemp(dir=os.path.dirname(fname)) + f = os.fdopen(fdno, "w") + self.requests.reverse() + for r in self.requests: + r.dump(f) + self.requests.reverse() + f.flush() + os.fsync(f.fileno()) + f.close() + os.chmod(tmpfname, 0644) + os.rename(tmpfname, fname) + + def dump_html(self, fname): + (fdno, tmpfname) = tempfile.mkstemp(dir=os.path.dirname(fname)) + f = os.fdopen(fdno, "w") + f.write(""" + + + + PLD builder queue + + + +\n""" + ) + self.requests.reverse() + for r in self.requests: + r.dump_html(f) + self.requests.reverse() + f.write("\n") + f.flush() + os.fsync(f.fileno()) + f.close() + os.chmod(tmpfname, 0644) + os.rename(tmpfname, fname) + + # read possibly compressed, signed queue + def read_signed(self): + if re.search(r"\.gz$", self.name): + f = gzip.open(self.name) + else: + f = open(self.name) + (signers, body) = gpg.verify_sig(f.read()) + self.signers = signers + self.requests = request.parse_requests(body) + + def _open(self): + if self.fd == None: + if os.access(self.name, os.F_OK): + self.fd = open(self.name, "r+") + else: + self.fd = open(self.name, "w+") + + def read(self): + self._open() + self.signers = [] + body = self.fd.read() + if string.strip(body) == "": + # empty file, don't choke + self.requests = [] + return + try: + self.requests = request.parse_requests(body) + except Exception, e: + log.panic("error parsing %s: %s" % (self.name, e)) + pass + + def _write_to(self, f): + f.write("\n") + for r in self.requests: + r.write_to(f) + f.write("\n") + + def write(self): + self._open() + self.fd.seek(0) + self.fd.truncate(0) + self._write_to(self.fd) + self.fd.flush() + + def lock(self, no_block): + self._open() + op = fcntl.LOCK_EX + if no_block: + op = op + fcntl.LOCK_NB + try: + fcntl.flock(self.fd, op) + return 1 + except IOError: + return 0 + + def unlock(self): + fcntl.flock(self.fd, fcntl.LOCK_UN) + + def write_signed(self, name): + sio = StringIO.StringIO() + self._write_to(sio) + sio.seek(0) + sio.write(gpg.sign(sio.read())) + sio.seek(0) + (fdno, tmpname) = tempfile.mkstemp(dir=os.path.dirname(name)) + f = os.fdopen(fdno, "w") + if re.search(r"\.gz$", name): + fgz = gzip.GzipFile(filename=name, mode="w", compresslevel=6, fileobj=f) + util.sendfile(sio, fgz) + fgz.close() + else: + util.sendfile(sio, f) + f.flush() + os.fsync(f.fileno()) + f.close() + os.chmod(tmpname, 0644) + os.rename(tmpname, name) + + def add(self, req): + self.requests.append(req) + + def value(self): + return self.requests diff --git a/pld-builder.new/PLD_Builder/build.py b/pld-builder.new/PLD_Builder/build.py new file mode 100644 index 0000000..e996731 --- /dev/null +++ b/pld-builder.new/PLD_Builder/build.py @@ -0,0 +1,120 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import string +import os +import atexit + +import notify +import path +import util +import chroot +import stopwatch +import report +import log +import buildlogs +import status +from config import config, init_conf + + + +def run_command(batch): + # we want to keep "skip" in queue.html + command = batch.command + + # rewrite special "skip:BUILD_ID into touch + if command[:5] == "skip:": + c = "" + for id in command[5:].split(','): + if os.path.isdir(path.srpms_dir + '/' + id): + c = c + "echo skip:%s;\n" % (id) + c = c + "touch %s/%s/skipme;\n" % (path.srpms_dir, id) + else: + c = c + "echo %s is not valid build-id;\n" % (id) + command = c + + if "no-chroot" in batch.command_flags: + # TODO: the append here by shell hack should be solved in python + c = "(%s) >> %s 2>&1" % (command, batch.logfile) + f = os.popen(c) + for l in f.xreadlines(): + pass + r = f.close() + if r == None: + return 0 + else: + return r + else: + user = "root" + if "as-builder" in batch.command_flags: + user = "builder" + return chroot.run(command, logfile = batch.logfile, user = user) + +def build_all(r, build_fnc): + status.email = r.requester_email + notify.begin(r) + tmp = path.build_dir + '/' + util.uuid() + "/" + r.tmp_dir = tmp + os.mkdir(tmp) + atexit.register(util.clean_tmp, tmp) + + log.notice("started processing %s" % r.id) + r.chroot_files = [] + r.some_ok = 0 + for batch in r.batches: + can_build = 1 + failed_dep = "" + for dep in batch.depends_on: + if dep.build_failed: + can_build = 0 + failed_dep = dep.spec + + if batch.is_command() and can_build: + batch.logfile = tmp + "command" + if config.builder in batch.builders: + log.notice("running %s" % batch.command) + stopwatch.start() + batch.build_failed = run_command(batch) + if batch.build_failed: + log.notice("running %s FAILED" % batch.command) + notify.add_batch(batch, "FAIL") + else: + r.some_ok = 1 + log.notice("running %s OK" % batch.command) + notify.add_batch(batch, "OK") + batch.build_time = stopwatch.stop() + report.add_pld_builder_info(batch) + buildlogs.add(batch.logfile, failed = batch.build_failed, id=r.id) + else: + log.notice("not running command, not for me.") + batch.build_failed = 0 + batch.log_line("queued command %s for other builders" % batch.command) + r.some_ok = 1 + buildlogs.add(batch.logfile, failed = batch.build_failed, id=r.id) + elif can_build: + log.notice("building %s" % batch.spec) + stopwatch.start() + batch.logfile = tmp + batch.spec + ".log" + batch.gb_id=r.id + batch.requester=r.requester + batch.requester_email=r.requester_email + batch.build_failed = build_fnc(r, batch) + if batch.build_failed: + log.notice("building %s FAILED (%s)" % (batch.spec, batch.build_failed)) + notify.add_batch(batch, batch.build_failed) + else: + r.some_ok = 1 + log.notice("building %s OK" % (batch.spec)) + notify.add_batch(batch, "OK") + batch.build_time = stopwatch.stop() + report.add_pld_builder_info(batch) + buildlogs.add(batch.logfile, failed = batch.build_failed, id=r.id) + else: + batch.build_failed = 1 + batch.skip_reason = "SKIPED [%s failed]" % failed_dep + batch.logfile = None + batch.build_time = "" + log.notice("building %s %s" % (batch.spec, batch.skip_reason)) + notify.add_batch(batch, "SKIP") + + buildlogs.flush() + chroot.run("rm -f %s" % string.join(r.chroot_files)) diff --git a/pld-builder.new/PLD_Builder/buildlogs.py b/pld-builder.new/PLD_Builder/buildlogs.py new file mode 100644 index 0000000..0bcd530 --- /dev/null +++ b/pld-builder.new/PLD_Builder/buildlogs.py @@ -0,0 +1,69 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import path +import time +import os +import re +import log + +from config import config +import util + +class Buildlogs_Queue: + def __init__(self): + self.queue = [] + self.some_failed = 0 + self.requester_email = None + + def init(self, g): + self.requester_email = g.requester_email + + def add(self, logfile, failed, id): + # if /dev/null, don't even bother to store it + if config.buildlogs_url == "/dev/null": + return + blogfile = os.path.basename(logfile) + name = re.sub(r"\.spec\.log", "", blogfile) + "," + id + ".bz2" + ret = os.system("bzip2 --best --force < %s > %s" \ + % (logfile, path.buildlogs_queue_dir + '/' + config.builder + '.' + id + '.' + blogfile)) + if ret: + log.error("bzip2 compression of %s failed; does bzip2 binary exist?" % (logfile)) + + if failed: s = "FAIL" + else: s = "OK" + f = open(path.buildlogs_queue_dir + '/' + config.builder + '.' + id + '.' + blogfile + ".info", "w") + f.write("Status: %s\nEND\n" % s) + f.close() + + self.queue.append({'name': name, 'id': config.builder + '.' + id + '.' + blogfile, 'failed': failed}) + + def flush(self): + def desc(l): + return """Target: %s/%s +Builder: %s +Time: %d +Type: buildlog +Requester: %s +END +""" % (config.buildlogs_url, l['name'], config.builder, time.time(), self.requester_email) + + q = self.queue[:] + for l in q: + f = open(path.buildlogs_queue_dir + '/' + l['id'] + ".desc.tmp", "w") + f.write(desc(l)) + f.close() + os.rename(path.buildlogs_queue_dir + '/' + l['id'] + ".desc.tmp", path.buildlogs_queue_dir + '/' + l['id'] + ".desc") + self.queue.remove(l) + +queue = Buildlogs_Queue() + +def init(r): + queue.init(r) + +def add(logfile, failed, id): + "Add new buildlog with specified status." + queue.add(logfile, failed, id) + +def flush(): + "Send buildlogs to server." + queue.flush() diff --git a/pld-builder.new/PLD_Builder/chroot.py b/pld-builder.new/PLD_Builder/chroot.py new file mode 100644 index 0000000..f099d01 --- /dev/null +++ b/pld-builder.new/PLD_Builder/chroot.py @@ -0,0 +1,83 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import os +import re +import random + +try: + from hashlib import md5 as md5 +except ImportError: + from md5 import md5 + +from config import config + +def quote(cmd): + return re.sub("([\"\\\\$`])", r"\\\1", cmd) + +def command(cmd, user = None, nostdin=""): + if user == None: + user = config.builder_user + if nostdin: + nostdin = "exec < /dev/null; " + return "%s sudo chroot %s su - %s -c \"export LC_ALL=C; %s %s\"" \ + % (config.sudo_chroot_wrapper, config.chroot, user, nostdin, quote(cmd)) + +def command_sh(cmd): + return "%s sudo chroot %s /bin/sh -c \"export LC_ALL=C; exec < /dev/null; %s\"" \ + % (config.sudo_chroot_wrapper, config.chroot, quote(cmd)) + +def popen(cmd, user = "builder", mode = "r"): + f = os.popen(command(cmd, user), mode) + return f + +def run(cmd, user = "builder", logfile = None, logstdout = None): + c = command(cmd, user, nostdin=True) + if logfile != None: + if logstdout != None: + c = "%s 2>&1 | /usr/bin/tee -a %s" % (c, logfile) + else: + c = "%s >> %s 2>&1" % (c, logfile) + f = os.popen(c) + if logstdout != None: + for l in f: + logstdout.write(l) + r = f.close() + if r == None: + return 0 + else: + return r + +def cp(file, outfile, user="builder", rm=False): + m = md5() + m.update(str(random.sample(xrange(100000), 500))) + digest = m.hexdigest() + + marker_start = "--- FILE BEGIN DIGEST %s ---" % digest + marker_end = "--- FILE END DIGEST %s ---" % digest + + f = open(outfile, 'wb') + cmd = "echo \"%s\"; cat %s; echo \"%s\"" % (marker_start, file, marker_end) + if rm: + cmd += "; rm %s" % file + c = command(cmd, user) + p = os.popen(c) + # get file contents + marker = False + for l in p: + if not marker and l.strip() == marker_start: + marker = True + continue + me = l.find(marker_end) + if me != -1: + l = l[:me] + f.write(l) + marker = False + break + if marker: + f.write(l) + rp = p.close() + rf = f.close() + if rp == None: + return 0 + else: + return rp diff --git a/pld-builder.new/PLD_Builder/cia-foot.xml b/pld-builder.new/PLD_Builder/cia-foot.xml new file mode 100644 index 0000000..18418fe --- /dev/null +++ b/pld-builder.new/PLD_Builder/cia-foot.xml @@ -0,0 +1,4 @@ + + + + diff --git a/pld-builder.new/PLD_Builder/cia-head.xml b/pld-builder.new/PLD_Builder/cia-head.xml new file mode 100644 index 0000000..2bf3ac0 --- /dev/null +++ b/pld-builder.new/PLD_Builder/cia-head.xml @@ -0,0 +1,13 @@ + + + PLD Linux Builder client for CIA + 0.001 + http://cvs.pld-linux.org/pld-builder.new/PLD_Builder/report.py + + + pld + Ac + + + + diff --git a/pld-builder.new/PLD_Builder/config.py b/pld-builder.new/PLD_Builder/config.py new file mode 100644 index 0000000..2e78cd8 --- /dev/null +++ b/pld-builder.new/PLD_Builder/config.py @@ -0,0 +1,124 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import ConfigParser +import string +import os +import syslog + +import path +import log +import status + + +syslog_facilities = { + 'kern': syslog.LOG_KERN, + 'user': syslog.LOG_USER, + 'mail': syslog.LOG_MAIL, + 'daemon': syslog.LOG_DAEMON, + 'auth': syslog.LOG_AUTH, + 'lpr': syslog.LOG_LPR, + 'news': syslog.LOG_NEWS, + 'uucp': syslog.LOG_UUCP, + 'cron': syslog.LOG_CRON, + 'local0': syslog.LOG_LOCAL0, + 'local1': syslog.LOG_LOCAL1, + 'local2': syslog.LOG_LOCAL2, + 'local3': syslog.LOG_LOCAL3, + 'local4': syslog.LOG_LOCAL4, + 'local5': syslog.LOG_LOCAL5, + 'local6': syslog.LOG_LOCAL6, + 'local7': syslog.LOG_LOCAL7 +} + +class Builder_Conf: + def __init__(self): + self.done = 0 + pass + + def read(self, builder): + p = ConfigParser.ConfigParser() + def get(o, d = None): + if p.has_option(builder, o): + return string.strip(p.get(builder, o)) + elif p.has_option("all", o): + return string.strip(p.get("all", o)) + elif d != None: + return d + else: + log.panic("cannot find %s::%s" % (builder, o)) + + p.readfp(open(path.builder_conf)) + + self.admin_email = get("admin_email") + self.email = self.admin_email + + if p.has_option("all", "syslog"): + f = p.get("all", "syslog") + if f != "": + if syslog_facilities.has_key(f): + log.open_syslog("builder", syslog_facilities[f]) + else: + log.panic("no such syslog facility: %s" % f) + + if builder == "src": + builder = get("src_builder", builder) + self.builder = builder + + self.binary_builders = string.split(get("binary_builders")) + self.src_builder = string.strip(get("src_builder", "")) + self.tag_prefixes = string.split(get("tag_prefixes", "")) + self.max_keep_time = int(get("max_keep_time", 168))*60*60 + self.bot_email = get("bot_email", "") + self.control_url = get("control_url") + self.request_handler_server_port = int(get("request_handler_server_port", 1234)) + self.builder_list = get("builder_list", "") + self.gen_upinfo = get("gen_upinfo", "yes") + if self.gen_upinfo == 'no': + self.gen_upinfo = False + else: + self.gen_upinfo = True + status.admin = self.admin_email + status.builder_list = self.builder_list + self.max_jobs = int(get("max_jobs")) + + if builder == "all": + return + + self.email = get("email") + if builder not in p.sections(): + log.panic("builder %s not in config file" % builder) + self.arch = get("arch") + self.chroot = get("chroot") + self.buildlogs_url = get("buildlogs_url", "/dev/null") + self.ftp_url = get("ftp_url") + self.notify_url = get("notify_url") + self.test_ftp_url = get("test_ftp_url", "/dev/null") + self.rpmqa_url = get("rpmqa_url", "/dev/null") + self.rpmqa_filename = get("rpmqa_filename") + self.job_slots = int(get("job_slots")) + self.max_load = float(get("max_load")) + self.rpm_cache_dir = get("rpm_cache_dir", "/spools/ready") + self.builder_user = get("builder_user", "builder") + self.sudo_chroot_wrapper = get("sudo_chroot_wrapper", "") + self.nice = get("nice", "0") + + f = get("syslog", "") + if f != "": + if syslog_facilities.has_key(f): + log.open_syslog(self.builder, syslog_facilities[f]) + else: + log.panic("no such syslog facility: %s" % f) + + self.done = 1 + +config = Builder_Conf() + +def init_conf(builder=None): + os.environ['LC_ALL'] = "C" + status.push("reading builder config") + log.builder = builder + if not builder: + builder = "all" + config.read(builder) + log.builder = config.builder + status.pop() diff --git a/pld-builder.new/PLD_Builder/deps.py b/pld-builder.new/PLD_Builder/deps.py new file mode 100644 index 0000000..404b25c --- /dev/null +++ b/pld-builder.new/PLD_Builder/deps.py @@ -0,0 +1,121 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import string +from chroot import * +from util import * + +__all__ = ['compute_deps', 'remove_list'] + +def compute_deps(): + """Compute dependenecies between RPM installed on system. + + Return dictionary from name of package to list of packages required by it. + Produce some warnings and progress information to stderr. + """ + # pkg-name -> list of stuff returned by rpm -qR + rpm_req = {} + # --whatprovides ... + rpm_prov = {} + # list of required files + req_files = {} + + def get_req(): + msg("rpm-req... ") + f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{REQUIRENAME}\n]'") + cur_pkg = None + while 1: + l = f.readline() + if l == "": break + l = string.strip(l) + if l == "@": + cur_pkg = string.strip(f.readline()) + rpm_req[cur_pkg] = [] + continue + rpm_req[cur_pkg].append(l) + if l[0] == '/': + req_files[l] = 1 + f.close() + msg("done\n") + + def add_provides(pkg, what): + if rpm_prov.has_key(what): + msg("[%s: %s, %s] " % (what, rpm_prov[what], pkg)) + else: + rpm_prov[what] = pkg + + def get_prov(): + msg("rpm-prov... ") + f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{PROVIDENAME}\n]'") + cur_pkg = None + while 1: + l = f.readline() + if l == "": break + l = string.strip(l) + if l == "@": + cur_pkg = string.strip(f.readline()) + continue + add_provides(cur_pkg, l) + if l[0] == '/': + # already provided + del req_files[l] + f.close() + msg("done\n") + + def get_prov_files(): + msg("rpm-files... ") + f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{FILENAMES}\n]'") + cur_pkg = None + while 1: + l = f.readline() + if l == "": break + l = string.strip(l) + if l == "@": + cur_pkg = string.strip(f.readline()) + continue + if req_files.has_key(l): + add_provides(cur_pkg, l) + f.close() + msg("done\n") + + def compute(): + msg("computing deps... ") + for pkg, reqs in rpm_req.items(): + pkg_reqs = [] + for req in reqs: + if req[0:7] == "rpmlib(": continue + if rpm_prov.has_key(req): + if rpm_prov[req] not in pkg_reqs: + pkg_reqs.append(rpm_prov[req]) + else: + msg("[%s: %s] " % (pkg, req)) + requires[pkg] = pkg_reqs + msg("done\n") + + # map from pkg-name to list of pkg-names required by it + # this is result + requires = {} + + get_req() + get_prov() + get_prov_files() + compute() + return requires + +def remove_list(req, need): + """List of packages scheduled for removal. + + Given dependency information and list of needed packages compute list + of packages that don't need to be present. + """ + need_m = {} + def close(n): + if need_m.has_key(n): return + need_m[n] = 1 + if not req.has_key(n): return + for k in req[n]: + close(k) + for n in need: close(n) + rm = [] + for p in req.keys(): + if not need_m.has_key(p): rm.append(p) + return rm diff --git a/pld-builder.new/PLD_Builder/file_sender.py b/pld-builder.new/PLD_Builder/file_sender.py new file mode 100644 index 0000000..f8d41f3 --- /dev/null +++ b/pld-builder.new/PLD_Builder/file_sender.py @@ -0,0 +1,222 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import glob +import re +import string +import os +import time +import shutil +import sys +import traceback +import urllib2 + +from config import config, init_conf +import mailer +import path +import log +import loop +import status +import lock + +retries_times = [5 * 60, 5 * 60, 10 * 60, 10 * 60, 30 * 60, 60 * 60] + +def read_name_val(file): + f = open(file) + r = {'_file': file[:-5], '_desc': file} + rx = re.compile(r"^([^:]+)\s*:(.*)$") + for l in f.xreadlines(): + if l == "END\n": + f.close() + return r + m = rx.search(l) + if m: + r[m.group(1)] = string.strip(m.group(2)) + else: + break + f.close() + return None + +def scp_file(src, target): + global problems + f = os.popen("scp -v -B %s %s 2>&1 < /dev/null" % (src, target)) + p = f.read() + ret = f.close() + if ret: + problems[src] = p + return ret + +def copy_file(src, target): + try: + shutil.copyfile(src, target) + return 0 + except: + global problems + exctype, value = sys.exc_info()[:2] + problems[src] = "cannot copy file: %s" % traceback.format_exception_only(exctype, value) + return 1 + +def rsync_file(src, target, host): + global problems + + p = open(path.rsync_password_file, "r") + password = "" + for l in p.xreadlines(): + l = string.split(l) + if len(l) >= 2 and l[0] == host: + password = l[1] + p.close() + + # NOTE: directing STDIN to /dev/null, does not make rsync to skip asking + # password, it opens /dev/tty and still asks if password is needed and + # missing, therefore we always set RSYNC_PASSWORD env var + os.environ["RSYNC_PASSWORD"] = password + rsync = "rsync --verbose --archive --timeout=360 --contimeout=360" + f = os.popen("%s %s %s 2>&1" % (rsync, src, target)) + p = f.read() + ret = f.close() + if ret: + problems[src] = p + del os.environ["RSYNC_PASSWORD"]; + return ret + +def rsync_ssh_file(src, target): + global problems + rsync = "rsync --verbose --archive --timeout=360 -e ssh" + f = os.popen("%s %s %s 2>&1 < /dev/null" % (rsync, src, target)) + p = f.read() + ret = f.close() + if ret: + problems[src] = p + return ret + +def post_file(src, url): + global problems + try: + f = open(src, 'r') + data = f.read() + f.close() + req = urllib2.Request(url, data) + req.add_header('X-Filename', os.path.basename(src)) + f = urllib2.urlopen(req) + f.close() + except Exception, e: + problems[src] = e + return e + return 0 + +def send_file(src, target): + global problems + try: + log.notice("sending %s to %s (size %d bytes)" % (src, target, os.stat(src).st_size)) + m = re.match('rsync://([^/]+)/.*', target) + if m: + return not rsync_file(src, target, host = m.group(1)) + if target != "" and target[0] == '/': + return not copy_file(src, target) + m = re.match('scp://([^@:]+@[^/:]+)(:|)(.*)', target) + if m: + return not scp_file(src, m.group(1) + ":" + m.group(3)) + m = re.match('ssh\+rsync://([^@:]+@[^/:]+)(:|)(.*)', target) + if m: + return not rsync_ssh_file(src, m.group(1) + ":" + m.group(3)) + m = re.match('http://.*', target) + if m: + return not post_file(src, target) + log.alert("unsupported protocol: %s" % target) + except OSError, e: + problems[src] = e + log.error("send_file(%s, %s): %s" % (src, target, e)) + return False + return True + +def maybe_flush_queue(dir): + retry_delay = 0 + try: + f = open(dir + "/retry-at") + last_retry = int(string.strip(f.readline())) + retry_delay = int(string.strip(f.readline())) + f.close() + if last_retry + retry_delay > time.time(): + return + os.unlink(dir + "/retry-at") + except: + pass + + status.push("flushing %s" % dir) + + if flush_queue(dir): + f = open(dir + "/retry-at", "w") + if retry_delay in retries_times: + idx = retries_times.index(retry_delay) + if idx < len(retries_times) - 1: idx += 1 + else: + idx = 0 + f.write("%d\n%d\n" % (time.time(), retries_times[idx])) + f.close() + + status.pop() + +def flush_queue(dir): + q = [] + os.chdir(dir) + for f in glob.glob(dir + "/*.desc"): + d = read_name_val(f) + if d != None: q.append(d) + def mycmp(x, y): + rc = cmp(x['Time'], y['Time']) + if rc == 0 and x.has_key('Type') and y.has_key('Type'): + return cmp(x['Type'], y['Type']) + else: + return rc + q.sort(mycmp) + + error = None + # copy of q + remaining = q[:] + for d in q: + if not send_file(d['_file'], d['Target']): + error = d + continue + if os.access(d['_file'] + ".info", os.F_OK): + if not send_file(d['_file'] + ".info", d['Target'] + ".info"): + error = d + continue + os.unlink(d['_file'] + ".info") + os.unlink(d['_file']) + os.unlink(d['_desc']) + remaining.remove(d) + + if error != None: + emails = {} + emails[config.admin_email] = 1 + pr = "" + for src, msg in problems.iteritems(): + pr = pr + "[src: %s]\n\n%s\n" % (src, msg) + for d in remaining: + if d.has_key('Requester'): + emails[d['Requester']] = 1 + e = emails.keys() + m = mailer.Message() + m.set_headers(to = string.join(e, ", "), + subject = "[%s] builder queue problem" % config.builder) + m.write("there were problems sending files from queue %s:\n" % dir) + m.write("problems:\n") + m.write("%s\n" % pr) + m.send() + log.error("error sending files from %s:\n%s\n" % (dir, pr)) + return 1 + + return 0 + +problems = {} + +def main(): + if lock.lock("sending-files", non_block = 1) == None: + return + init_conf() + maybe_flush_queue(path.notify_queue_dir) + maybe_flush_queue(path.buildlogs_queue_dir) + maybe_flush_queue(path.ftp_queue_dir) + +if __name__ == '__main__': + loop.run_loop(main) diff --git a/pld-builder.new/PLD_Builder/ftp.py b/pld-builder.new/PLD_Builder/ftp.py new file mode 100644 index 0000000..0d13be6 --- /dev/null +++ b/pld-builder.new/PLD_Builder/ftp.py @@ -0,0 +1,77 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import path +import os +import shutil +import time + +from config import config +import util + +class FTP_Queue: + def __init__(self): + self.queue = None + self.some_failed = 0 + self.status = "" + + def init(self, g=None, rpmqa=False): + self.queue = [] + if rpmqa: + self.requester_email = config.admin_email + self.url = config.rpmqa_url + else: + self.requester_email = g.requester_email + if "test-build" in g.flags: + self.url = config.test_ftp_url + else: + self.url = config.ftp_url + + def add(self, file, type): + # if /dev/null, say bye bye + if self.url == "/dev/null": + return + name = os.path.basename(file) + id = util.uuid() + shutil.copy(file, path.ftp_queue_dir + '/' + id) + self.queue.append({'name': name, 'id': id, 'type': type}) + st = os.stat(path.ftp_queue_dir + '/' + id) + self.status += "%10d %s\n" % (st.st_size, name) + + def flush(self): + def desc(l): + return """Target: %s/%s +Builder: %s +Time: %d +Type: %s +Requester: %s +END +""" % (self.url, l['name'], config.builder, time.time(), l['type'], self.requester_email) + + for l in self.queue: + f = open(path.ftp_queue_dir + '/' + l['id'] + ".desc", "w") + f.write(desc(l)) + f.close() + + def kill(self): + for l in self.queue: + os.unlink(path.ftp_queue_dir + '/' + l) + +queue = FTP_Queue() + +def add(f, type="rpm"): + queue.add(f, type) + +def flush(): + queue.flush() + +def kill(): + queue.kill() + +def init(r=None, rpmqa=False): + queue.init(r, rpmqa) + +def status(): + return queue.status + +def clear_status(): + queue.status = "" diff --git a/pld-builder.new/PLD_Builder/get_br.py b/pld-builder.new/PLD_Builder/get_br.py new file mode 100644 index 0000000..1f69394 --- /dev/null +++ b/pld-builder.new/PLD_Builder/get_br.py @@ -0,0 +1,103 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import re +import string +import xreadlines +from util import * + + +def get_build_requires(spec, bconds_with, bconds_without): + cond_rx = re.compile(r"%\{(\!\?|\?\!|\?)([a-zA-Z0-9_+]+)\s*:([^%\{\}]*)\}") + + def expand_conds(l): + def expand_one(m): + if m.group(1) == "?": + if macros.has_key(m.group(2)): + return m.group(3) + else: + if not macros.has_key(m.group(2)): + return m.group(3) + return "" + + for i in range(10): + l = cond_rx.sub(expand_one, l) + if len(l) > 1000: break + + return l + + macro_rx = re.compile(r"%\{([a-zA-Z0-9_+]+)\}") + def expand_macros(l): + def expand_one(m): + if macros.has_key(m.group(1)): + return string.strip(macros[m.group(1)]) + else: + return m.group(0) # don't change + + for i in range(10): + l = macro_rx.sub(expand_one, l) + if len(l) > 1000: break + + return expand_conds(l) + + simple_br_rx = re.compile(r"^BuildRequires\s*:\s*([^\s]+)", re.I) + bcond_rx = re.compile(r"^%bcond_(with|without)\s+([^\s]+)") + version_rx = re.compile(r"^Version\s*:\s*([^\s]+)", re.I) + release_rx = re.compile(r"^Release\s*:\s*([^\s]+)", re.I) + name_rx = re.compile(r"^Name\s*:\s*([^\s]+)", re.I) + define_rx = re.compile(r"^\%define\s+([a-zA-Z0-9_+]+)\s+(.*)", re.I) + any_br_rx = re.compile(r"BuildRequires", re.I) + + macros = {} + for b in bconds_with: + macros["_with_%s" % b] = 1 + for b in bconds_without: + macros["_without_%s" % b] = 1 + + macros["__perl"] = "/usr/bin/perl" + macros["_bindir"] = "/usr/bin" + macros["_sbindir"] = "/usr/sbin" + macros["kgcc_package"] = "gcc" + + build_req = [] + + f = open(spec) + for l in xreadlines.xreadlines(f): + l = string.strip(l) + if l == "%changelog": break + + # %bcond_with.. + m = bcond_rx.search(l) + if m: + bcond = m.group(2) + if m.group(1) == "with": + if macros.has_key("_with_%s" % bcond): + macros["with_%s" % bcond] = 1 + else: + if not macros.has_key("_without_%s" % bcond): + macros["with_%s" % bcond] = 1 + continue + + # name,version,release + m = version_rx.search(l) + if m: macros["version"] = m.group(1) + m = release_rx.search(l) + if m: macros["release"] = m.group(1) + m = name_rx.search(l) + if m: macros["name"] = m.group(1) + + # %define + m = define_rx.search(l) + if m: macros[m.group(1)] = m.group(2) + + # *BuildRequires* + if any_br_rx.search(l): + l = expand_macros(l) + m = simple_br_rx.search(l) + if m: + build_req.append(m.group(1)) + else: + if l <> "" and l[0] <> '#': + msg("spec error (%s): %s\n" % (spec, l)) + + for x in build_req: + print x diff --git a/pld-builder.new/PLD_Builder/gpg.py b/pld-builder.new/PLD_Builder/gpg.py new file mode 100644 index 0000000..5c5dbec --- /dev/null +++ b/pld-builder.new/PLD_Builder/gpg.py @@ -0,0 +1,88 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import log +import subprocess +import re +import StringIO + +import util +import os +import pipeutil + +def get_keys(buf): + """Extract keys from gpg message + + """ + + if not os.path.isfile('/usr/bin/gpg'): + log.error("missing gnupg binary: /usr/bin/gpg") + raise OSError, 'Missing gnupg binary' + + d_stdout = None + d_stderr = None + cmd = ['/usr/bin/gpg', '--batch', '--no-tty', '--decrypt'] + gpg_run = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + try: + d_stdout, d_stderr = gpg_run.communicate(buf.encode('utf-8')) + except OSError, e: + log.error("gnupg run, does gpg binary exist? : %s" % e) + raise + + rx = re.compile("^gpg: Signature made .*using [DR]SA key ID (.+)") + keys = [] + + for l in d_stderr.split('\n'): + m = rx.match(l) + if m: + keys.append(m.group(1)) + + return keys + +def verify_sig(buf): + """Check signature. + + Given email as file-like object, return (signer-emails, signed-body). + where signer-emails is lists of strings, and signed-body is StringIO + object. + """ + + if not os.path.isfile('/usr/bin/gpg'): + log.error("missing gnupg binary: /usr/bin/gpg") + raise OSError, 'Missing gnupg binary' + + d_stdout = None + d_stderr = None + cmd = ['/usr/bin/gpg', '--batch', '--no-tty', '--decrypt'] + gpg_run = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + try: + d_stdout, d_stderr = gpg_run.communicate(buf.encode('utf-8')) + except OSError, e: + log.error("gnupg run failed, does gpg binary exist? : %s" % e) + raise + + rx = re.compile("^gpg: (Good signature from| aka) .*<([^>]+)>") + emails = [] + for l in d_stderr.split('\n'): + m = rx.match(l) + if m: + emails.append(m.group(2)) + if not emails: + log.error("gnupg signature check failed: %s" % d_stderr) + return (emails, d_stdout) + +def sign(buf): + if not os.path.isfile('/usr/bin/gpg'): + log.error("missing gnupg binary: /usr/bin/gpg") + raise OSError, 'Missing gnupg binary' + + d_stdout = None + d_stderr = None + cmd = ['/usr/bin/gpg', '--batch', '--no-tty', '--clearsign'] + gpg_run = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) + try: + d_stdout, d_stderr = gpg_run.communicate(buf.encode('utf-8')) + except OSError, e: + log.error("gnupg signing failed, does gpg binary exist? : %s" % e) + raise + + return d_stdout diff --git a/pld-builder.new/PLD_Builder/install.py b/pld-builder.new/PLD_Builder/install.py new file mode 100644 index 0000000..91cc889 --- /dev/null +++ b/pld-builder.new/PLD_Builder/install.py @@ -0,0 +1,202 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import re, os +import string +import StringIO + +import chroot +import util +import log + +hold = [ + 'dev', + 'poldek', + 'rpm-build', + 'pdksh', + 'coreutils' +] + +def close_killset(killset): + k = killset.keys() + if len(k) == 0: + return True + rx = re.compile(r'^.* marks (?P[^\s]+?)-[^-]+-[^-]+\s.*$') + errors = "" + for p in k: + if p in hold: + del killset[p] + errors += "cannot remove %s because it's crucial\n" % p + else: + f = chroot.popen("poldek --noask --test --test --erase %s" % p, user = "root") + crucial = 0 + e = [] + for l in f.xreadlines(): + m = rx.search(l) + if m: + pkg = m.group('name') + if pkg in hold: + errors += "cannot remove %s because it's required " \ + "by %s, that is crucial\n" % (p, pkg) + crucial = 1 + e.append(pkg) + f.close() + if crucial: + del killset[p] + else: + for p in e: + killset[p] = 2 + return errors + +def upgrade_from_batch(r, b): + f = chroot.popen("rpm --test -F %s 2>&1" % string.join(b.files), user = "root") + killset = {} + rx = re.compile(r' \(installed\) (?P[^\s]+)-[^-]+-[^-]+$') + for l in f.xreadlines(): + m = rx.search(l) + if m: killset[m.group('name')] = 1 + f.close() + if len(killset) != 0: + err = close_killset(killset) + if err != "": + util.append_to(b.logfile, err) + log.notice("cannot upgrade rpms") + return False + k = string.join(killset.keys()) + if True: + b.log_line("upgrade requires removal of %s" % k) + res = chroot.run("rpm -e %s" % k, logfile = b.logfile, user = "root") + if res != 0: + b.log_line("package removal failed") + return False + else: + b.log_line("packages removed sucessfuly") + else: + b.log_line("upgrade would need removal of %s" % k) + return False + b.log_line("upgrading packages") + logbuf = StringIO.StringIO() + res = chroot.run("rpm -Fvh %s" % string.join(b.files), user = "root", logfile = b.logfile) + if res != 0: + b.log_line("package upgrade failed") + logbuf.close() + return False + logbuf.close() + return True + +def uninstall(conflicting, b): + b.log_line("uninstalling conflicting packages") + err = close_killset(conflicting) + if err != "": + util.append_to(b.logfile, err) + b.log_line("error: conflicting packages uninstallation failed") + return False + else: + for k in conflicting.keys(): + b.log_line("removing %s" % k) + res = chroot.run("poldek --noask --erase %s" % k, logfile = b.logfile, user = "root") + if res != 0: + b.log_line("package %s removal failed" % k) + return True + +def uninstall_self_conflict(b): + b.log_line("checking BuildConflict-ing packages") + packagename = b.spec[:-5] + tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s/tmp" % packagename + f = chroot.popen("set -e; TMPDIR=%(tmpdir)s rpmbuild -bp --nobuild --short-circuit --define 'prep exit 0' %(rpmdefs)s rpm/packages/%(package)s/%(spec)s 2>&1" % { + 'tmpdir': tmpdir, + 'rpmdefs' : b.rpmbuild_opts(), + 'package' : packagename, + 'spec': b.spec, + }) + # java-sun >= 1.5 conflicts with soprano-2.1.67-1.src + # java-sun conflicts with soprano-2.1.67-1.src + rx = re.compile(r"\s+(?P[\w-]+)\s+.*conflicts with [^\s]+-[^-]+-[^-]+\.src($| .*)") + conflicting = {} + for l in f.xreadlines(): + m = rx.search(l) + if m: + b.log_line("rpmbuild: %s" % l.rstrip()) + conflicting[m.group('name')] = 1 + f.close() + if len(conflicting) and not uninstall(conflicting, b): + return False + b.log_line("no BuildConflicts found") + return True + +def install_br(r, b): + def get_missing_br(r, b): + # ignore internal rpm dependencies, see lib/rpmns.c for list + ignore_br = re.compile(r'^\s*(rpmlib|cpuinfo|getconf|uname|soname|user|group|mounted|diskspace|digest|gnupg|macro|envvar|running|sanitycheck|vcheck|signature|verify|exists|executable|readable|writable)\(.*') + + packagename = b.spec[:-5] + tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s/tmp" % packagename + chroot.run("install -m 700 -d %s" % tmpdir) + cmd = "set -e; TMPDIR=%(tmpdir)s rpmbuild --nobuild %(rpmdefs)s rpm/packages/%(package)s/%(spec)s 2>&1" % { + 'tmpdir': tmpdir, + 'rpmdefs' : b.rpmbuild_opts(), + 'package' : packagename, + 'spec': b.spec, + } + f = chroot.popen(cmd) + rx = re.compile(r"^\s*(?P[^\s]+) .*is needed by") + needed = {} + b.log_line("checking BR") + for l in f.xreadlines(): + b.log_line("rpm: %s" % l.rstrip()) + m = rx.search(l) + if m and not ignore_br.match(l): + needed[m.group('name')] = 1 + f.close() + return needed + + needed = get_missing_br(r, b); + + if len(needed) == 0: + b.log_line("no BR needed") + return True + + nbr = "" + for bre in needed.keys(): + nbr = nbr + " " + re.escape(bre) + br = string.strip(nbr) + b.log_line("updating poldek cache...") + chroot.run("poldek --up --upa", user = "root", logfile = b.logfile) + # check conflicts in BRed packages + b.log_line("checking conflicting packages in BRed packages") + f = chroot.popen("poldek --test --test --noask --caplookup -Q -v --upgrade %s" % br, user = "root") + # phonon-devel-4.3.1-1.i686 conflicts with qt4-phonon-devel-4.5.0-6.i686 + # jdbc-stdext >= 2.0 is required by installed java-struts-1.3.10-1.noarch + # jmx is needed by (installed) java-commons-modeler-2.0-1.noarch + rx = re.compile(r".*(conflicts with|is required by|is needed by)( installed| \(installed\)|) (?P[^\s]+)-[^-]+-[^-]+($| .*)") + conflicting = {} + for l in f.xreadlines(): + b.log_line("poldek: %s" % l.rstrip()) + m = rx.search(l) + if m: conflicting[m.group('name')] = 1 + f.close() + if len(conflicting) == 0: + b.log_line("no conflicts found") + else: + if not uninstall(conflicting, b): + return False + + # recheck BuildRequires since above uninstallation could remove some required deps + needed = get_missing_br(r, b); + + if len(needed) == 0: + b.log_line("no BR needed") + return True + + nbr = "" + for bre in needed.keys(): + nbr = nbr + " " + re.escape(bre) + br = string.strip(nbr) + + b.log_line("installing BR: %s" % br) + res = chroot.run("poldek --noask --caplookup -Q -v --upgrade %s" % br, + user = "root", + logfile = b.logfile) + if res != 0: + b.log_line("error: BR installation failed") + return False + return True diff --git a/pld-builder.new/PLD_Builder/load_balancer.py b/pld-builder.new/PLD_Builder/load_balancer.py new file mode 100644 index 0000000..2e182c8 --- /dev/null +++ b/pld-builder.new/PLD_Builder/load_balancer.py @@ -0,0 +1,73 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import string +import os +import time + +import path +import sys +import log +import status +import lock +import loop + +import rpm_builder + +from config import config, init_conf + +# return list of binary builders in fair-queue order +# it is determined based upon spool/got_lock file, which is also +# updated to be short +def builders_order(): + bs = {} + bl = [] + for b in config.binary_builders: + bs[b] = 0 + bl.append(b) + + lck = lock.lock("got-lock") + f = open(path.got_lock_file, "r+") + line_no = 0 + + for l in f.xreadlines(): + line_no += 1 + b = string.strip(l) + if bs.has_key(b): + bs[b] = line_no + else: + log.alert("found strange lock in got-lock: %s" % b) + + def mycmp(b1, b2): + return cmp(bs[b1], bs[b2]) + + bl.sort(mycmp) + + f.seek(0) + f.truncate(0) + for l in bl: f.write(l + "\n") + f.close() + lck.close() + + return bl + +def run_rpm_builder(b): + if os.fork() == 0: + return + else: + rpm_builder.main_for(b) + sys.exit(0) + +def main(): + init_conf() + for b in builders_order(): + run_rpm_builder(b) + # give builder some time to aquire lock + time.sleep(1) + # wait for children to die out + try: + while 1: os.wait() + except: + pass + +if __name__ == '__main__': + loop.run_loop(main) diff --git a/pld-builder.new/PLD_Builder/lock.py b/pld-builder.new/PLD_Builder/lock.py new file mode 100644 index 0000000..942cce5 --- /dev/null +++ b/pld-builder.new/PLD_Builder/lock.py @@ -0,0 +1,21 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import fcntl + +import path + +locks_list = [] + +def lock(n, non_block = 0): + f = open(path.lock_dir + '/' + n, "a") + # blah, otherwise it gets garbage collected and doesn't work + locks_list.append(f) + if non_block: + try: + fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB) + except: + f.close() + return None + else: + fcntl.flock(f, fcntl.LOCK_EX) + return f diff --git a/pld-builder.new/PLD_Builder/log.py b/pld-builder.new/PLD_Builder/log.py new file mode 100644 index 0000000..54a6c67 --- /dev/null +++ b/pld-builder.new/PLD_Builder/log.py @@ -0,0 +1,52 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import os +import sys +import time +import syslog + +import path + +builder = "" +do_syslog = 0 + +# string containing last log entry, +# as the code is flawed to get this otherwise +_last_log = "" + +def log(p, s): + global _last_log + _last_log = s + + if do_syslog: + try: + syslog.syslog(p, str(s)) + except TypeError: + syslog.syslog(p, repr(s)) + f = open(path.log_file, "a") + f.write("%s [%s]: %s [%s]\n" % (time.asctime(), builder, s, os.path.basename(sys.argv[0]))) + f.close() + +def panic(s): + log(syslog.LOG_ALERT, "PANIC: %s" % s) + raise Exception, "PANIC: %s" % str(s) + +def alert(s): + log(syslog.LOG_ALERT, "alert: %s" % s) + +def error(s): + log(syslog.LOG_ERR, "error: %s" % s) + +def warn(s): + log(syslog.LOG_WARNING, "warning: %s" % s) + +def notice(s): + log(syslog.LOG_NOTICE, "notice: %s" % s) + +def open_syslog(name, f): + global do_syslog + do_syslog = 1 + syslog.openlog(name, syslog.LOG_PID, f) + +def last_log(): + return _last_log diff --git a/pld-builder.new/PLD_Builder/loop.py b/pld-builder.new/PLD_Builder/loop.py new file mode 100644 index 0000000..bc5be9a --- /dev/null +++ b/pld-builder.new/PLD_Builder/loop.py @@ -0,0 +1,31 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import os +import sys +import time + +import wrap + +def run_loop(fnc, secs = 5, max = 60): + def run(): + pid = os.fork() + if pid == 0: + wrap.wrap(fnc) + sys.exit(0) + else: + pid, s = os.waitpid(pid, 0) + if os.WIFEXITED(s): + s = os.WEXITSTATUS(s) + if s != 0: + sys.exit(s) + else: + sys.exit(10) + + start = time.time() + while time.time() - start < max: + last = time.time() + run() + took = time.time() - last + if took < secs: + time.sleep(secs - took) + diff --git a/pld-builder.new/PLD_Builder/mailer.py b/pld-builder.new/PLD_Builder/mailer.py new file mode 100644 index 0000000..e0b3ae6 --- /dev/null +++ b/pld-builder.new/PLD_Builder/mailer.py @@ -0,0 +1,88 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import time +import os +import sys +import StringIO + +from config import config +import util +import log + +def recode(s): + if s.__class__ == ''.__class__: + return s.decode('iso-8859-1', 'replace').encode('us-ascii', 'replace') + else: + return s.encode('us-ascii', 'replace') + +class Message: + def __init__(self): + self.headers = {} + self.body = StringIO.StringIO() + self.set_std_headers() + + def set_header(self, n, v): + self.headers[n] = v + + def set_headers(self, to = None, cc = None, subject = None): + if to != None: + self.set_header("To", to) + if cc != None: + self.set_header("Cc", cc) + if subject != None: + self.set_header("Subject", subject) + + def write_line(self, l): + self.body.write(recode("%s\n" % l)) + + def write(self, s): + self.body.write(recode(s)) + + def append_log(self, log): + s = os.stat(log) + if s.st_size > 50000: + # just head and tail + f = open(log) + line_cnt = 0 + for l in f.xreadlines(): + line_cnt += 1 + f.seek(0) + line = 0 + for l in f.xreadlines(): + if line < 100 or line > line_cnt - 100: + self.body.write(recode(l)) + if line == line_cnt - 100: + self.body.write("\n\n[...]\n\n") + line += 1 + else: + util.sendfile(open(log), self.body) + + def set_std_headers(self): + self.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()) + self.headers["Message-ID"] = "" \ + % (time.time(), os.getpid(), os.uname()[1]) + self.headers["From"] = "PLD %s builder <%s>" \ + % (config.builder, config.email) + self.headers["X-PLD-Builder"] = config.builder + + def write_to(self, f): + for k, v in self.headers.items(): + f.write("%s: %s\n" % (k, v)) + f.write("\n") + self.body.seek(0) + util.sendfile(self.body, f) + + def send(self): + if not os.path.exists("/usr/lib/sendmail"): + # TODO: dump to file? + log.alert("/usr/lib/sendmail doesn't exist: Can't send email") + return False + send_sendmail = "/usr/lib/sendmail -i -t -f %s" % config.admin_email + f = os.popen(send_sendmail, "w") + try: + self.write_to(f) + except IOError, e: + log.alert("sending email message failed: %s" % e) + f.close() + return False + return f.close() diff --git a/pld-builder.new/PLD_Builder/maintainer.py b/pld-builder.new/PLD_Builder/maintainer.py new file mode 100644 index 0000000..0ef6608 --- /dev/null +++ b/pld-builder.new/PLD_Builder/maintainer.py @@ -0,0 +1,80 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import os +import sys +import time +import datetime + +from config import config, init_conf +import util +import chroot +import ftp +import path + +def clean_dir(path, max): + curtime=time.time() + for i in os.listdir(path): + if curtime - os.path.getmtime(path+'/'+i) > max: + if os.path.isdir(path+'/'+i): + util.clean_tmp(path+'/'+i) + else: + os.unlink(path+'/'+i) + +def send_rpmqa(): + tmp = path.build_dir + '/' + util.uuid() + '/' + os.mkdir(tmp) + log = tmp + config.rpmqa_filename + open(log, 'a').write("Query done at: %s\n" % datetime.datetime.now().isoformat(' ')) + chroot.run("rpm -qa|sort", logfile=log) + os.chmod(log,0644) + ftp.init(rpmqa=True) + ftp.add(log) + ftp.flush() + os.unlink(log) + os.rmdir(tmp) + +def handle_src(): + send_rpmqa() + clean_dir(path.www_dir+'/srpms', 2592000) # a month + +def handle_bin(): + send_rpmqa() + f=chroot.popen("""ls -l --time-style +%s /spools/ready""", 'root') + rmpkgs=[] + curtime=time.time() + for i in f: + if i[-4:-1]!='rpm': + continue + tmp=i.split() + mtime=int(tmp[5]) + pkgname=tmp[6] + if curtime - mtime > config.max_keep_time: + rmpkgs.append(pkgname) + + i=0 + while rmpkgs[i:i+1000]: + chroot.run("cd /spools/ready; rm -f %s" % ' '.join(rmpkgs[i:i+1000]), 'root') + i=i+1000 + f.close() + chroot.run("poldek --mo=nodiff --mkidxz -s /spools/ready") + +if __name__ == '__main__': + init_conf() + bb=config.binary_builders[:] + clean_dir(path.spool_dir+'/builds', 2592000) # a month + if config.src_builder: + try: + init_conf(config.src_builder) + except: + pass + else: + handle_src() + sys.exit(0) + for b in bb: + try: + init_conf(b) + except: + continue + else: + handle_bin() + diff --git a/pld-builder.new/PLD_Builder/notify.py b/pld-builder.new/PLD_Builder/notify.py new file mode 100644 index 0000000..5b0368f --- /dev/null +++ b/pld-builder.new/PLD_Builder/notify.py @@ -0,0 +1,41 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import StringIO + +import mailer +import gpg +import util +import notifyq +from config import config + +class Notifier: + def __init__(self, g): + self.xml = StringIO.StringIO() + self.xml.write("\n" % \ + (g.id, config.builder)) + + def send(self, r): + sio = StringIO.StringIO() + self.xml.write("\n") + self.xml.seek(0) + sio.write(gpg.sign(self.xml.read())) + self.xml = None + sio.seek(0) + notifyq.init(r) + notifyq.add(sio) + notifyq.flush() + + def add_batch(self, b, s): + self.xml.write(" \n" % (b.b_id, s)) + +n = None + +def begin(group): + global n + n = Notifier(group) + +def add_batch(batch, status): + n.add_batch(batch, status) + +def send(r): + n.send(r) diff --git a/pld-builder.new/PLD_Builder/notifyq.py b/pld-builder.new/PLD_Builder/notifyq.py new file mode 100644 index 0000000..d7bea90 --- /dev/null +++ b/pld-builder.new/PLD_Builder/notifyq.py @@ -0,0 +1,65 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import path +import os +import shutil +import time + +from config import config +import util + +class Notify_Queue: + def __init__(self): + self.queue = None + self.some_failed = 0 + + def init(self, g=None): + self.queue = [] + self.requester_email = g.requester_email + self.notify_url = config.notify_url + + def add(self, file): + id = util.uuid() + f = open(path.notify_queue_dir + '/' + id, 'w') + f.write(file.read()) + f.close() + self.queue.append({'id': id}) + + def flush(self): + def desc(l): + return """Target: %s +Id: %s +Builder: %s +Time: %d +Requester: %s +END +""" % (self.notify_url, l['id'], config.builder, time.time(), self.requester_email) + + for l in self.queue: + f = open(path.notify_queue_dir + '/' + l['id'] + ".desc", "w") + f.write(desc(l)) + f.close() + + def kill(self): + for l in self.queue: + os.unlink(path.notify_queue_dir + '/' + l) + +queue = Notify_Queue() + +def add(notify): + queue.add(notify) + +def flush(): + queue.flush() + +def kill(): + queue.kill() + +def init(r): + queue.init(r) + +def status(): + return queue.status + +def clear_status(): + queue.status = "" diff --git a/pld-builder.new/PLD_Builder/path.py b/pld-builder.new/PLD_Builder/path.py new file mode 100644 index 0000000..1558ab9 --- /dev/null +++ b/pld-builder.new/PLD_Builder/path.py @@ -0,0 +1,34 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import os + +root_dir = os.getenv('BUILDERPATH') +if root_dir is None: + root_dir = os.path.expanduser('~/pld-builder.new') +conf_dir = root_dir + "/config" +spool_dir = root_dir + "/spool" +lock_dir = root_dir + "/lock" +www_dir = root_dir + "/www" + +acl_conf = conf_dir + "/acl.conf" +builder_conf = conf_dir + "/builder.conf" +rsync_password_file = conf_dir + "/rsync-passwords" + +# spool/ +queue_file = spool_dir + "/queue" +req_queue_file = spool_dir + "/req_queue" +processed_ids_file = spool_dir + "/processed_ids" +notify_queue_dir = spool_dir + "/notify" +buildlogs_queue_dir = spool_dir + "/buildlogs" +ftp_queue_dir = spool_dir + "/ftp" +build_dir = spool_dir + "/builds" +last_req_no_file = spool_dir + "/last_req_no" +got_lock_file = spool_dir + "/got_lock" +log_file = spool_dir + "/log" + +# www/ +srpms_dir = www_dir + "/srpms" +req_queue_signed_file = www_dir + "/queue.gz" +max_req_no_file = www_dir + "/max_req_no" +queue_stats_file = www_dir + "/queue.txt" +queue_html_stats_file = www_dir + "/queue.html" diff --git a/pld-builder.new/PLD_Builder/pipeutil.py b/pld-builder.new/PLD_Builder/pipeutil.py new file mode 100644 index 0000000..4979fea --- /dev/null +++ b/pld-builder.new/PLD_Builder/pipeutil.py @@ -0,0 +1,41 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import select +import os +import StringIO + +def rw_pipe(buf_, infd, outfd): + buf = StringIO.StringIO() + buf.write(buf_.read()) + ret = StringIO.StringIO() + pos = 0 + rd_fin = 0 + wr_fin = 0 + buf.seek(pos) + while not (rd_fin and wr_fin): + if wr_fin: + o = [] + else: + o = [infd] + if rd_fin: + i = [] + else: + i = [outfd] + i, o, e = select.select(i, o, []) + if i != []: + s = os.read(outfd.fileno(), 1000) + if s == "": + rd_fin = 1 + ret.write(s) + if o != []: + buf.seek(pos) + s = buf.read(1000) + if s == "": + infd.close() + wr_fin = 1 + else: + cnt = os.write(infd.fileno(), s) + pos += cnt + outfd.close() + ret.seek(0) + return ret diff --git a/pld-builder.new/PLD_Builder/poldek.py b/pld-builder.new/PLD_Builder/poldek.py new file mode 100644 index 0000000..9b23a89 --- /dev/null +++ b/pld-builder.new/PLD_Builder/poldek.py @@ -0,0 +1,87 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import re +import types +import string +import xreadlines + +from chroot import * +from util import * + + +def get_poldek_requires(): + # precompile regexps + name_rx = re.compile(r"\d+\. ([^\s]+)-[^-]+-[^-]+\n") + req_rx = re.compile(r" req .* --> (.*)\n") + pkg_name_rx = re.compile(r"([^\s]+)-[^-]+-[^-]+") + + # todo: if a and b are sets, then use sets module + # and intersection method on set object + def intersect(a, b): + r = [] + for x in a: + if x in b: r.append(x) + return r + + # add given req-list to cur_pkg_reqs + def add_req(reqs): + if len(reqs) == 1: + if reqs[0] not in cur_pkg_reqs: + cur_pkg_reqs.append(reqs[0]) + else: + did = 0 + for x in cur_pkg_reqs: + if type(x) is types.ListType: + i = intersect(x, reqs) + if len(i) == 0: + continue + did = 1 + idx = cur_pkg_reqs.index(x) + if len(i) == 1: + if i[0] in cur_pkg_reqs: + del cur_pkg_reqs[idx] + else: + cur_pkg_reqs[idx] = i[0] + else: + cur_pkg_reqs[idx] = i + else: + if x in reqs: + return + if not did: + cur_pkg_reqs.append(reqs) + + pkg_reqs = {} + cur_pkg_reqs = None + cur_pkg = None + + f = chr_popen("poldek -v -v --verify --unique-pkg-names") + for l in xreadlines.xreadlines(f): + m = name_rx.match(l) + if m: + if cur_pkg: + pkg_reqs[cur_pkg] = cur_pkg_reqs + cur_pkg = m.groups(1) + if pkg_reqs.has_key(cur_pkg): + cur_pkg = None + cur_pkg_reqs = None + else: + cur_pkg_reqs = [] + continue + m = req_rx.match(l) + if m: + reqs = [] + for x in string.split(m.group(1)): + if x in ["RPMLIB_CAP", "NOT", "FOUND", "UNMATCHED"]: continue + m = pkg_name_rx.match(x) + if m: + reqs.append(m.group(1)) + else: + msg("poldek_reqs: bad pkg name: %s\n" % x) + if len(reqs) != 0: add_req(reqs) + + f.close() + + if cur_pkg: + pkg_reqs[cur_pkg] = cur_pkg_reqs + + return pkg_reqs diff --git a/pld-builder.new/PLD_Builder/report.py b/pld-builder.new/PLD_Builder/report.py new file mode 100644 index 0000000..a0fe7be --- /dev/null +++ b/pld-builder.new/PLD_Builder/report.py @@ -0,0 +1,139 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import string +import path + +import ftp +import stopwatch +import mailer +from config import config + +def unpackaged_files(b): + msg = "warning: Installed (but unpackaged) file(s) found:\n" + f = open(b.logfile) + copy_mode = 0 + out = [] + for l in f.xreadlines(): + if l == msg: + copy_mode = 1 + out.append(l) + elif copy_mode: + if l[0] != ' ': + copy_mode = 0 + else: + out.append(l) + return out + +def add_pld_builder_info(b): + l = open(b.logfile, "a") + l.write("Begin-PLD-Builder-Info\n") + l.write("Build-Time: %s\n\n" % b.build_time) + st = ftp.status() + if st != "": + l.write("Files queued for ftp:\n%s\n" % st) + ftp.clear_status() + l.writelines(unpackaged_files(b)) + l.write("End-PLD-Builder-Info\n") + +def info_from_log(b, target): + beg = "Begin-PLD-Builder-Info\n" + end = "End-PLD-Builder-Info\n" + f = open(b.logfile) + copy_mode = 0 + need_header = 1 + for l in f.xreadlines(): + if l == beg: + if need_header: + need_header = 0 + target.write("\n--- %s:%s:\n" % (b.spec, b.branch)) + copy_mode = 1 + elif copy_mode: + if l == end: + copy_mode = 0 + else: + target.write(l) + +def send_report(r, is_src = False): + s_failed = ' '.join([b.spec for b in r.batches if b.build_failed]) + s_ok = ' '.join([b.spec for b in r.batches if not b.build_failed]) + upgrades_status = [b.upgraded for b in r.batches] + + if s_failed: s_failed = "ERRORS: %s" % s_failed + if s_ok: s_ok = "OK: %s" % s_ok + + subject = '' + + if 'test-build' in r.flags: + subject = 'TEST build ' + + if not is_src and 'upgrade' in r.flags and False in upgrades_status: + subject = 'upgrade failed ' + + subject += ' '.join((s_failed, s_ok)).strip() + + m = mailer.Message() + m.set_headers(to = r.requester_email, + cc = config.builder_list, + subject = subject[0:100]) + if is_src: + m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id) + else: + m.set_header("References", "<%s@pld.src.builder>" % r.id) + m.set_header("In-Reply-To", "<%s@pld.src.builder>" % r.id) + + for b in r.batches: + if b.build_failed and b.logfile == None: + info = b.skip_reason + elif b.build_failed: + info = "FAILED" + else: + info = "OK" + m.write("%s (%s): %s\n" % (b.spec, b.branch, info)) + + for b in r.batches: + if b.logfile != None: + info_from_log(b, m) + + for b in r.batches: + if (b.is_command () or b.build_failed) and b.logfile != None: + m.write("\n\n*** buildlog for %s\n" % b.spec) + m.append_log(b.logfile) + m.write("\n\n") + + m.send() + +def send_cia_report(r, is_src = False): + + subject = 'DeliverXML' + + m = mailer.Message() + if (len(config.bot_email) == 0): + return + + m.set_headers(to = config.bot_email, + subject = subject) + m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id) + m.set_header("X-mailer", "$Id$") + m.set_header("X-builder", "PLD") + + # get header of xml message from file + f = open(path.root_dir + '/PLD_Builder/cia-head.xml') + m.write(f.read()) + f.close() + + # write in iteration list and status of all processed files + for b in r.batches: + m.write('\n' % (b.spec, b.branch)) + if b.build_failed: + m.write('\n') + else: + m.write('\n') + m.write('\n') + + # get footer of xml message from file + f = open(path.root_dir + '/PLD_Builder/cia-foot.xml') + m.write(f.read()) + f.close() + + # send the e-mail + m.send() diff --git a/pld-builder.new/PLD_Builder/request.py b/pld-builder.new/PLD_Builder/request.py new file mode 100644 index 0000000..c87d644 --- /dev/null +++ b/pld-builder.new/PLD_Builder/request.py @@ -0,0 +1,441 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +from xml.dom.minidom import * +import string +import time +import xml.sax.saxutils +import fnmatch +import os +import urllib +import cgi + +import util +import log +from acl import acl +from config import config + +__all__ = ['parse_request', 'parse_requests'] + +def text(e): + res = "" + for n in e.childNodes: + if n.nodeType != Element.TEXT_NODE: + log.panic("xml: text expected in <%s>, got %d" % (e.nodeName, n.nodeType)) + res += n.nodeValue + return res + +def attr(e, a, default = None): + try: + return e.attributes[a].value + except: + if default != None: + return default + raise + +def escape(s): + return xml.sax.saxutils.escape(s) + +# return timestamp with timezone information +# so we could parse it in javascript +def tzdate(t): + # as strftime %z is unofficial, and does not work, need to make it numeric ourselves +# date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t)) + date = time.strftime("%a %b %d %Y %H:%M:%S", time.localtime(t)) + # NOTE: the altzone is showing CURRENT timezone, not what the "t" reflects + # NOTE: when DST is off timezone gets it right, altzone not + if time.daylight: + tzoffset = time.altzone + else: + tzoffset = time.timezone + tz = '%+05d' % (-tzoffset / 3600 * 100) + return date + ' ' + tz + +def is_blank(e): + return e.nodeType == Element.TEXT_NODE and string.strip(e.nodeValue) == "" + +class Group: + def __init__(self, e): + self.batches = [] + self.kind = 'group' + self.id = attr(e, "id") + self.no = int(attr(e, "no")) + self.priority = 2 + self.time = time.time() + self.requester = "" + self.max_jobs = 0 + self.requester_email = "" + self.flags = string.split(attr(e, "flags", "")) + for c in e.childNodes: + if is_blank(c): continue + + if c.nodeType != Element.ELEMENT_NODE: + log.panic("xml: evil group child %d" % c.nodeType) + if c.nodeName == "batch": + self.batches.append(Batch(c)) + elif c.nodeName == "requester": + self.requester = text(c) + self.requester_email = attr(c, "email", "") + elif c.nodeName == "priority": + self.priority = int(text(c)) + elif c.nodeName == "time": + self.time = int(text(c)) + elif c.nodeName == "maxjobs": + self.max_jobs = int(text(c)) + else: + log.panic("xml: evil group child (%s)" % c.nodeName) + # note that we also check that group is sorted WRT deps + m = {} + for b in self.batches: + deps = [] + m[b.b_id] = b + for dep in b.depends_on: + if m.has_key(dep): + # avoid self-deps + if id(m[dep]) != id(b): + deps.append(m[dep]) + else: + log.panic("xml: dependency not found in group") + b.depends_on = deps + if self.requester_email == "" and self.requester != "": + self.requester_email = acl.user(self.requester).mail_to() + + def dump(self, f): + f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority)) + f.write(" from: %s\n" % self.requester) + f.write(" flags: %s\n" % string.join(self.flags)) + f.write(" time: %s\n" % time.asctime(time.localtime(self.time))) + for b in self.batches: + b.dump(f) + f.write("\n") + + def dump_html(self, f): + f.write( + "
\n" + "%(no)d. %(time)s from %(requester)s " + "%(id)s, prio=%(priority)d, jobs=%(max_jobs)d, %(flags)s\n" + % { + 'no': self.no, + 'id': '%(id)s' % {'id': self.id}, + 'time': escape(tzdate(self.time)), + 'requester': escape(self.requester), + 'priority': self.priority, + 'max_jobs': self.max_jobs, + 'flags': string.join(self.flags) + }) + f.write("
    \n") + for b in self.batches: + b.dump_html(f, self.id) + f.write("
\n") + f.write("
\n") + + def write_to(self, f): + f.write(""" + + %s + + %d + %d\n""" % (self.id, self.no, string.join(self.flags), + escape(self.requester_email), escape(self.requester), + self.time, self.priority, self.max_jobs)) + for b in self.batches: + b.write_to(f) + f.write(" \n\n") + + def is_done(self): + ok = 1 + for b in self.batches: + if not b.is_done(): + ok = 0 + return ok + +class Batch: + def __init__(self, e): + self.bconds_with = [] + self.bconds_without = [] + self.builders = [] + self.builders_status = {} + self.builders_status_time = {} + self.builders_status_buildtime = {} + self.kernel = "" + self.target = [] + self.branch = "" + self.src_rpm = "" + self.info = "" + self.spec = "" + self.command = "" + self.command_flags = [] + self.skip = [] + self.gb_id = "" + self.b_id = attr(e, "id") + self.depends_on = string.split(attr(e, "depends-on")) + self.upgraded = True + for c in e.childNodes: + if is_blank(c): continue + + if c.nodeType != Element.ELEMENT_NODE: + log.panic("xml: evil batch child %d" % c.nodeType) + if c.nodeName == "src-rpm": + self.src_rpm = text(c) + elif c.nodeName == "spec": + # normalize specname, specname is used as buildlog and we don't + # want to be exposed to directory traversal attacks + self.spec = text(c).split('/')[-1] + elif c.nodeName == "command": + self.spec = "COMMAND" + self.command = text(c).strip() + self.command_flags = string.split(attr(c, "flags", "")) + elif c.nodeName == "info": + self.info = text(c) + elif c.nodeName == "kernel": + self.kernel = text(c) + elif c.nodeName == "target": + self.target.append(text(c)) + elif c.nodeName == "skip": + self.skip.append(text(c)) + elif c.nodeName == "branch": + self.branch = text(c) + elif c.nodeName == "builder": + key = text(c) + self.builders.append(key) + self.builders_status[key] = attr(c, "status", "?") + self.builders_status_time[key] = attr(c, "time", "0") + self.builders_status_buildtime[key] = "0" #attr(c, "buildtime", "0") + elif c.nodeName == "with": + self.bconds_with.append(text(c)) + elif c.nodeName == "without": + self.bconds_without.append(text(c)) + else: + log.panic("xml: evil batch child (%s)" % c.nodeName) + + def is_done(self): + ok = 1 + for b in self.builders: + s = self.builders_status[b] + if not s.startswith("OK") and not s.startswith("SKIP") and not s.startswith("UNSUPP") and not s.startswith("FAIL"): + ok = 0 + return ok + + def dump(self, f): + f.write(" batch: %s/%s\n" % (self.src_rpm, self.spec)) + f.write(" info: %s\n" % self.info) + f.write(" kernel: %s\n" % self.kernel) + f.write(" target: %s\n" % self.target_string()) + f.write(" branch: %s\n" % self.branch) + f.write(" bconds: %s\n" % self.bconds_string()) + builders = [] + for b in self.builders: + builders.append("%s:%s" % (b, self.builders_status[b])) + f.write(" builders: %s\n" % string.join(builders)) + + def is_command(self): + return self.command != "" + + def dump_html(self, f, rid): + f.write("
  • \n") + if self.is_command(): + desc = "SH:
    %s
    flags: [%s]" % (self.command, ' '.join(self.command_flags)) + else: + package_url = "http://cvs.pld-linux.org/packages/%(package)s/%(spec)s?only_with_tag=%(branch)s" % { + 'spec': self.spec, + 'branch': self.branch, + 'package': self.spec[:-5], + } + desc = "%(src_rpm)s (%(spec)s -r %(branch)s%(bconds)s)" % { + 'src_rpm': self.src_rpm, + 'spec': self.spec, + 'branch': self.branch, + 'bconds': self.bconds_string() + self.kernel_string() + self.target_string(), + 'package_url': package_url, + } + f.write("%s [" % desc) + builders = [] + for b in self.builders: + s = self.builders_status[b] + if s.startswith("OK"): + c = "green" + elif s.startswith("FAIL"): + c = "red" + elif s.startswith("SKIP"): + c = "blue" + elif s.startswith("UNSUPP"): + c = "fuchsia" + else: + c = "black" + link_pre = "" + link_post = "" + if (s.startswith("OK") or s.startswith("SKIP") or s.startswith("UNSUPP") or s.startswith("FAIL")) and len(self.spec) > 5: + if self.is_command(): + bl_name = "command" + else: + bl_name = self.spec[:len(self.spec)-5] + lin_ar = b.replace('noauto-','') + path = "/%s/%s/%s,%s.bz2" % (lin_ar.replace('-','/'), s, bl_name, rid) + is_ok = 0 + if s.startswith("OK"): + is_ok = 1 + bld = lin_ar.split('-') + tree_name = '-'.join(bld[:-1]) + tree_arch = '-'.join(bld[-1:]) + link_pre = "" \ + % (urllib.quote(tree_name), urllib.quote(tree_arch), is_ok, urllib.quote(bl_name), urllib.quote(rid)) + link_post = "" + + def ftime(s): + t = float(s) + if t > 0: + return time.asctime(time.localtime(t)) + else: + return 'N/A' + + tooltip = "last update: %(time)s\nbuild time: %(buildtime)s" % { + 'time' : ftime(self.builders_status_time[b]), + 'buildtime' : ftime(self.builders_status_buildtime[b]), + } + builders.append(link_pre + + "%(builder)s:%(status)s" % { + 'color' : c, + 'builder' : b, + 'status' : s, + 'tooltip' : cgi.escape(tooltip, True), + } + + link_post) + f.write("%s]
  • \n" % string.join(builders)) + + def rpmbuild_opts(self): + """ + return all rpmbuild options related to this build + """ + bconds = self.bconds_string() + self.kernel_string() + self.target_string() + rpmdefs = \ + "--define '_topdir %(echo $HOME/rpm)' " \ + "--define '_specdir %{_topdir}/packages/%{name}' " \ + "--define '_sourcedir %{_specdir}' " \ + "--define '_builddir %{_topdir}/BUILD/%{name}' " + return rpmdefs + bconds + + def kernel_string(self): + r = "" + if self.kernel != "": + r = " --define 'alt_kernel " + self.kernel + "'" + return r + + def target_string(self): + if len(self.target) > 0: + return " --target " + ",".join(self.target) + else: + return "" + + def bconds_string(self): + r = "" + for b in self.bconds_with: + r = r + " --with " + b + for b in self.bconds_without: + r = r + " --without " + b + return r + + def default_target(self, arch): + self.target.append("%s-pld-linux" % arch) + + def write_to(self, f): + f.write(""" + + %s + %s + %s + %s + %s\n""" % (self.b_id, + string.join(map(lambda (b): b.b_id, self.depends_on)), + escape(self.src_rpm), + escape(' '.join(self.command_flags)), escape(self.command), + escape(self.spec), escape(self.branch), escape(self.info))) + if self.kernel != "": + f.write(" %s\n" % escape(self.kernel)) + for b in self.bconds_with: + f.write(" %s\n" % escape(b)) + for b in self.target: + f.write(" %s\n" % escape(b)) + for b in self.bconds_without: + f.write(" %s\n" % escape(b)) + for b in self.builders: + if self.builders_status_buildtime.has_key(b): + t = self.builders_status_buildtime[b] + else: + t = "0" + f.write(" %s\n" % \ + (escape(self.builders_status[b]), self.builders_status_time[b], t, escape(b))) + f.write(" \n") + + def log_line(self, l): + log.notice(l) + if self.logfile != None: + util.append_to(self.logfile, l) + + def expand_builders(batch, all_builders): + all = [] + for bld in batch.builders: + res = [] + for my_bld in all_builders: + if fnmatch.fnmatch(my_bld, bld): + res.append(my_bld) + if res != []: + all.extend(res) + else: + all.append(bld) + batch.builders = all + +class Notification: + def __init__(self, e): + self.batches = [] + self.kind = 'notification' + self.group_id = attr(e, "group-id") + self.builder = attr(e, "builder") + self.batches = {} + self.batches_buildtime = {} + for c in e.childNodes: + if is_blank(c): continue + if c.nodeType != Element.ELEMENT_NODE: + log.panic("xml: evil notification child %d" % c.nodeType) + if c.nodeName == "batch": + id = attr(c, "id") + status = attr(c, "status") + buildtime = attr(c, "buildtime", "0") + if not status.startswith("OK") and not status.startswith("SKIP") and not status.startswith("UNSUPP") and not status.startswith("FAIL"): + log.panic("xml notification: bad status: %s" % status) + self.batches[id] = status + self.batches_buildtime[id] = buildtime + else: + log.panic("xml: evil notification child (%s)" % c.nodeName) + + def apply_to(self, q): + for r in q.requests: + if r.kind == "group": + for b in r.batches: + if self.batches.has_key(b.b_id): + b.builders_status[self.builder] = self.batches[b.b_id] + b.builders_status_time[self.builder] = time.time() + b.builders_status_buildtime[self.builder] = "0" #self.batches_buildtime[b.b_id] + +def build_request(e): + if e.nodeType != Element.ELEMENT_NODE: + log.panic("xml: evil request element") + if e.nodeName == "group": + return Group(e) + elif e.nodeName == "notification": + return Notification(e) + elif e.nodeName == "command": + # FIXME + return Command(e) + else: + log.panic("xml: evil request [%s]" % e.nodeName) + +def parse_request(f): + d = parseString(f) + return build_request(d.documentElement) + +def parse_requests(f): + d = parseString(f) + res = [] + for r in d.documentElement.childNodes: + if is_blank(r): continue + res.append(build_request(r)) + return res diff --git a/pld-builder.new/PLD_Builder/request_fetcher.py b/pld-builder.new/PLD_Builder/request_fetcher.py new file mode 100644 index 0000000..a677a71 --- /dev/null +++ b/pld-builder.new/PLD_Builder/request_fetcher.py @@ -0,0 +1,135 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import string +import signal +import os +import urllib +import urllib2 +import StringIO +import sys +import gzip + +import path +import log +import status +import lock +import util +import gpg +import request +import loop +import socket +from acl import acl +from bqueue import B_Queue +from config import config, init_conf + +last_count = 0 + +def alarmalarm(signum, frame): + raise IOError, 'TCP connection hung' + +def has_new(control_url): + global last_count + cnt_f = open(path.last_req_no_file) + try: + last_count = int(string.strip(cnt_f.readline())) + except ValueError, e: + last_count = 0 + + cnt_f.close() + f = None + socket.setdefaulttimeout(240) + signal.signal(signal.SIGALRM, alarmalarm) + signal.alarm(300) + try: + headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' } + req = urllib2.Request(url=control_url + "/max_req_no", headers=headers) + f = urllib2.urlopen(req) + count = int(string.strip(f.readline())) + signal.alarm(0) + except Exception, e: + signal.alarm(0) + log.error("can't fetch %s: %s" % (control_url + "/max_req_no", e)) + sys.exit(1) + res = 0 + if count != last_count: + res = 1 + f.close() + return res + +def fetch_queue(control_url): + signal.signal(signal.SIGALRM, alarmalarm) + socket.setdefaulttimeout(240) + signal.alarm(300) + try: + headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' } + req = urllib2.Request(url=control_url + "/queue.gz", headers=headers) + f = urllib2.urlopen(req) + signal.alarm(0) + except Exception, e: + signal.alarm(0) + log.error("can't fetch %s: %s" % (control_url + "/queue.gz", e)) + sys.exit(1) + sio = StringIO.StringIO() + util.sendfile(f, sio) + f.close() + sio.seek(0) + f = gzip.GzipFile(fileobj = sio) + (signers, body) = gpg.verify_sig(f.read()) + u = acl.user_by_email(signers) + if u == None: + log.alert("queue.gz not signed with signature of valid user: %s" % signers) + sys.exit(1) + if not u.can_do("sign_queue", "all"): + log.alert("user %s is not allowed to sign my queue" % u.login) + sys.exit(1) + return request.parse_requests(body) + +def handle_reqs(builder, reqs): + qpath = path.queue_file + "-" + builder + if not os.access(qpath, os.F_OK): + util.append_to(qpath, "\n") + q = B_Queue(qpath) + q.lock(0) + q.read() + for r in reqs: + if r.kind != 'group': + raise Exception, 'handle_reqs: fatal: huh? %s' % r.kind + need_it = 0 + for b in r.batches: + if builder in b.builders: + need_it = 1 + if need_it: + log.notice("queued %s (%d) for %s" % (r.id, r.no, builder)) + q.add(r) + q.write() + q.unlock() + +def main(): + lck = lock.lock("request_fetcher", non_block = True) + if lck == None: + sys.exit(1) + init_conf() + acl.try_reload() + + status.push("fetching requests") + if has_new(config.control_url): + q = fetch_queue(config.control_url) + max_no = 0 + q_new = [] + for r in q: + if r.no > max_no: + max_no = r.no + if r.no > last_count: + q_new.append(r) + for b in config.binary_builders: + handle_reqs(b, q_new) + f = open(path.last_req_no_file, "w") + f.write("%d\n" % max_no) + f.close() + status.pop() + lck.close() + +if __name__ == '__main__': + # http connection is established (and few bytes transferred through it) + # each $secs seconds. + loop.run_loop(main, secs = 10) diff --git a/pld-builder.new/PLD_Builder/request_handler.py b/pld-builder.new/PLD_Builder/request_handler.py new file mode 100644 index 0000000..2c38e83 --- /dev/null +++ b/pld-builder.new/PLD_Builder/request_handler.py @@ -0,0 +1,203 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import email +import string +import time +import os +import StringIO +import sys +import fnmatch + +import gpg +import request +import log +import path +import util +import wrap +import status +from acl import acl +from lock import lock +from bqueue import B_Queue +from config import config, init_conf + +def check_double_id(id): + id_nl = id + "\n" + + ids = open(path.processed_ids_file) + for i in ids.xreadlines(): + if i == id_nl: + # FIXME: security email here? + log.alert("request %s already processed" % id) + return 1 + ids.close() + + ids = open(path.processed_ids_file, "a") + ids.write(id_nl) + ids.close() + + return 0 + +def handle_group(r, user): + lockf = None + def fail_mail(msg): + if len(r.batches) >= 1: + spec = r.batches[0].spec + else: + spec = "None.spec" + log.error("%s: %s" % (spec, msg)) + m = user.message_to() + m.set_headers(subject = "building %s failed" % spec) + m.write_line(msg) + m.send() + + lockf = lock("request") + if check_double_id(r.id): + lockf.close() + return + + for batch in r.batches: + + if not user.can_do("src", config.builder, batch.branch): + fail_mail("user %s is not allowed to src:%s:%s" \ + % (user.get_login(), config.builder, batch.branch)) + lockf.close() + return + + if 'test-build' in r.flags and 'upgrade' in r.flags: + fail_mail("it's forbidden to upgrade from a test build") + lockf.close() + return + + if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch): + fail_mail("user %s is not allowed to upgrade:%s:%s" \ + % (user.get_login(), config.builder, batch.branch)) + lockf.close() + return + + # src builder handles only special commands + if batch.is_command() and (batch.command in ["git pull"] or batch.command[:5] == "skip:" or config.builder in batch.builders): + batch.expand_builders(config.binary_builders + [config.src_builder]) + else: + batch.expand_builders(config.binary_builders) + + if not batch.is_command() and config.builder in batch.builders: + batch.builders.remove(config.builder) + + for bld in batch.builders: + batch.builders_status[bld] = '?' + batch.builders_status_time[bld] = time.time() + if bld not in config.binary_builders and bld != config.builder: + fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \ + (config.builder, bld, string.join(config.binary_builders))) + lockf.close() + return + if batch.is_command(): + if "no-chroot" in batch.command_flags: + if not user.can_do("command-no-chroot", bld): + fail_mail("user %s is not allowed to command-no-chroot:%s" \ + % (user.get_login(), bld)) + lockf.close() + return + if not user.can_do("command", bld): + fail_mail("user %s is not allowed to command:%s" \ + % (user.get_login(), bld)) + lockf.close() + return + elif not user.can_do("binary", bld, batch.branch): + pkg = batch.spec + if pkg.endswith(".spec"): + pkg = pkg[:-5] + if not user.can_do("binary-" + pkg, bld, batch.branch): + fail_mail("user %s is not allowed to binary-%s:%s:%s" \ + % (user.get_login(), pkg, bld, batch.branch)) + lockf.close() + return + + r.priority = user.check_priority(r.priority,config.builder) + r.requester = user.get_login() + r.requester_email = user.mail_to() + r.time = time.time() + log.notice("queued %s from %s" % (r.id, user.get_login())) + q = B_Queue(path.queue_file) + q.lock(0) + q.read() + q.add(r) + q.write() + q.unlock() + lockf.close() + +def handle_notification(r, user): + if not user.can_do("notify", r.builder): + log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder)) + q = B_Queue(path.req_queue_file) + q.lock(0) + q.read() + not_fin = filter(lambda (r): not r.is_done(), q.requests) + r.apply_to(q) + for r in not_fin: + if r.is_done(): + util.clean_tmp(path.srpms_dir + '/' + r.id) + now = time.time() + def leave_it(r): + # for ,,done'' set timeout to 4d + if r.is_done() and r.time + 4 * 24 * 60 * 60 < now: + return False + # and for not ,,done'' set it to 20d + if r.time + 20 * 24 * 60 * 60 < now: + util.clean_tmp(path.srpms_dir + '/' + r.id) + return False + return True + q.requests = filter(leave_it, q.requests) + q.write() + q.dump(path.queue_stats_file) + q.dump_html(path.queue_html_stats_file) + q.write_signed(path.req_queue_signed_file) + q.unlock() + +def handle_request(req, filename = None): + if req == '': + log.alert('Empty body received. Filename: %s' % filename) + return False + + keys = gpg.get_keys(req) + (em, body) = gpg.verify_sig(req) + if not em: + log.alert("Invalid signature, missing/untrusted key. Keys in gpg batch: '%s'" % keys) + return False + user = acl.user_by_email(em) + if user == None: + # FIXME: security email here + log.alert("'%s' not in acl. Keys in gpg batch: '%s'" % (em, keys)) + return False + + acl.set_current_user(user) + status.push("request from %s" % user.login) + r = request.parse_request(body) + if r.kind == 'group': + handle_group(r, user) + elif r.kind == 'notification': + handle_notification(r, user) + else: + msg = "%s: don't know how to handle requests of this kind '%s'" \ + % (user.get_login(), r.kind) + log.alert(msg) + m = user.message_to() + m.set_headers(subject = "unknown request") + m.write_line(msg) + m.send() + status.pop() + return True + +def handle_request_main(req, filename = None): + acl.try_reload() + init_conf("src") + status.push("handling email request") + ret = handle_request(req, filename = filename) + status.pop() + return ret + +def main(): + sys.exit(not handle_request_main(sys.stdin.read())) + +if __name__ == '__main__': + wrap.wrap(main) diff --git a/pld-builder.new/PLD_Builder/request_handler_server b/pld-builder.new/PLD_Builder/request_handler_server new file mode 100755 index 0000000..4abb47d --- /dev/null +++ b/pld-builder.new/PLD_Builder/request_handler_server @@ -0,0 +1,39 @@ +#!/bin/sh +self=$0 +if [ -L "$self" ]; then + self=$(readlink -f "$0") +fi +sock=$(dirname "$self")/request_handler_server.sock +daemon=0 +attach=0 + +if [ x"$1" = x"--daemon" -o x"$1" = x"-d" ]; then + daemon=1 +fi +if [ x"$1" = x"--attach" -o x"$1" = x"-a" ]; then + attach=1 +fi + +if [ ! -S $sock ]; then + daemon=1 +fi + +# if none of the modes attach to it +if [ $daemon = 0 -a $attach = 0 ]; then + attach=1 +fi + +if [ "$daemon" = 1 ]; then + echo "Starting request_handler_server" + rm -f $sock + cd $(dirname $sock) + exec dtach -n $(basename $sock) -r none python request_handler_server.py + exit 1 +fi + +# attach to session +if [ "$attach" = 1 ]; then + echo "Attaching to request_handler_server" + exec dtach -a $sock + exit 1 +fi diff --git a/pld-builder.new/PLD_Builder/request_handler_server.py b/pld-builder.new/PLD_Builder/request_handler_server.py new file mode 100644 index 0000000..5c591f0 --- /dev/null +++ b/pld-builder.new/PLD_Builder/request_handler_server.py @@ -0,0 +1,192 @@ +#!/usr/bin/python + +import socket +import string +import cgi +import time +import log +import sys +import traceback +import os +from config import config, init_conf + +from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer + +import request_handler +import path + +class MyHandler(BaseHTTPRequestHandler): + + def do_GET(self): + self.send_error(401); + + def do_POST(self): + global rootnode + try: + length = int(self.headers.getheader('content-length')) + ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) + if ctype != 'application/x-www-form-urlencoded': + log.error("request_handler_server: [%s]: 401 Unauthorized" % self.client_address[0]) + self.send_error(401) + self.end_headers() + return + + query = self.rfile.read(length) + + filename = self.headers.getheader('x-filename') + + if not request_handler.handle_request_main(query, filename = filename): + error = log.last_log(); + log.error("request_handler_server: [%s]: handle_request_main(..., %s) failed" % (self.client_address[0], filename)) + self.send_error(500, "%s: request failed. %s" % (filename, error)) + self.end_headers() + return + + self.send_response(200) + self.end_headers() + + except Exception, e: + self.send_error(500, "%s: %s" % (filename, e)) + self.end_headers() + log.error("request_handler_server: [%s]: exception: %s\n%s" % (self.client_address[0], e, traceback.format_exc())) + raise + pass + +def write_css(): + css_file = path.www_dir + "/style.css" + # skip if file exists and code is not newer + if os.path.exists(css_file) and os.stat(__file__).st_mtime < os.stat(css_file).st_mtime: + return + + # css from www.pld-linux.org wiki theme, using css usage firebug plugin to cleanup + css = """ +html { + background-color: white; + color: #5e5e5e; + font-family: Tahoma, Arial, Lucida Grande, sans-serif; + font-size: 0.75em; + line-height: 1.25em; +} + +a { + text-decoration: underline; + color: #006; +} + +a:hover { + color: #006; +} + +pre { + background: #FFF8EB; + border: 1pt solid #FFE2AB; + font-family: courier, monospace; + padding: 0.5em; + white-space: pre-wrap; + word-wrap: break-word; +} + +@media screen, projection { + html { + background-color: #f3efe3; + } + + body { + position: relative; + } + + div { + background-color: white; + margin: 10px 0px; + padding: 2px; + } + div > a { + font-weight: bold; + color: #5e5e5e; + } + div > a:hover { + color: #5e5e5e; + } + div.upgrade { + background-color: #e4f1cf; + } + div:target { + background-color: #ffffcc; + color: black; + } +} +@media print { + a { + background-color: inherit; + color: inherit; + } +} + +@media projection { + html { line-height: 1.8em; } + body, b, a, p { font-size: 22pt; } +} +""" + old_umask = os.umask(0022) + f = open(css_file, "w") + f.write(css) + f.close() + os.umask(old_umask) + +def write_js(): + js_file = path.www_dir + "/script.js" + # skip if file exists and code is not newer + if os.path.exists(js_file) and os.stat(__file__).st_mtime < os.stat(js_file).st_mtime: + return + + js = """ +// update date stamps to reflect viewers timezone +function update_tz(t) { + var el, off, dt, + collection = document.getElementsByTagName('span'); + for (off in collection) { + el = collection[off]; + if (el.id == 'tz') { + dt = new Date(el.innerHTML).toString(); + // strip timezone name, it is usually wrong when not initialized + // from TZ env, but reverse calculated from os data + dt = dt.replace(/\s+\(.+\)/, ""); + // strip "GMT" + dt = dt.replace(/GMT/, ""); + el.innerHTML = dt; + } + } +} +window.onload = update_tz; +""" + old_umask = os.umask(0022) + f = open(js_file, "w") + f.write(js) + f.close() + os.umask(old_umask) + +def main(): + write_css(); + write_js(); + socket.setdefaulttimeout(30) + try: + init_conf() + host = "" + port = config.request_handler_server_port + + try: + server = HTTPServer((host, port), MyHandler) + except Exception, e: + log.notice("request_handler_server: can't start server on [%s:%d]: %s" % (host, port, e)) + print >> sys.stderr, "ERROR: Can't start server on [%s:%d]: %s" % (host, port, e) + sys.exit(1) + + log.notice('request_handler_server: started on [%s:%d]...' % (host, port)) + server.serve_forever() + except KeyboardInterrupt: + log.notice('request_handler_server: ^C received, shutting down server') + server.socket.close() + +if __name__ == '__main__': + main() + diff --git a/pld-builder.new/PLD_Builder/rpm_builder.py b/pld-builder.new/PLD_Builder/rpm_builder.py new file mode 100644 index 0000000..e42c085 --- /dev/null +++ b/pld-builder.new/PLD_Builder/rpm_builder.py @@ -0,0 +1,400 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import sys +import os +import atexit +import time +import datetime +import string +import urllib +import urllib2 + +from config import config, init_conf +from bqueue import B_Queue +import lock +import util +import loop +import path +import status +import log +import chroot +import ftp +import buildlogs +import notify +import build +import report +import install + +# *HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK* +import socket + +socket.myorigsocket=socket.socket + +def mysocket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): + s=socket.myorigsocket(family, type, proto) + s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + return s + +socket.socket=mysocket +# *HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK* + +# this code is duplicated in srpm_builder, but we +# might want to handle some cases differently here +def pick_request(q): + def mycmp(r1, r2): + if r1.kind != 'group' or r2.kind != 'group': + raise Exception, "non-group requests" + pri_diff = cmp(r1.priority, r2.priority) + if pri_diff == 0: + return cmp(r1.time, r2.time) + else: + return pri_diff + q.requests.sort(mycmp) + ret = q.requests[0] + return ret + +def check_skip_build(r, b): + src_url = config.control_url + "/srpms/" + r.id + "/skipme" + good = False + b.log_line("checking if we should skip the build") + while not good: + try: + headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' } + req = urllib2.Request(url=src_url, headers=headers) + f = urllib2.urlopen(req) + good = True + except urllib2.HTTPError, error: + return False + except urllib2.URLError, error: + # see errno.h + try: + errno = error.errno + except AttributeError: + # python 2.4 + errno = error.reason[0] + + if errno in [-3, 60, 61, 110, 111]: + b.log_line("unable to connect... trying again") + continue + else: + return False + f.close() + return True + return False + +def fetch_src(r, b): + src_url = config.control_url + "/srpms/" + r.id + "/" + urllib.quote(b.src_rpm) + b.log_line("fetching %s" % src_url) + start = time.time() + good = False + while not good: + try: + headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' } + req = urllib2.Request(url=src_url, headers=headers) + f = urllib2.urlopen(req) + good = True + except urllib2.HTTPError, error: + # fail in a way where cron job will retry + msg = "unable to fetch url %s, http code: %d" % (src_url, error.code) + b.log_line(msg) + queue_time = time.time() - r.time + # 6 hours + if error.code != 404 or (queue_time >= 0 and queue_time < (6 * 60 * 60)): + raise IOError, msg + else: + msg = "in queue for more than 6 hours, download failing" + b.log_line(msg) + return False + except urllib2.URLError, error: + # see errno.h + try: + errno = error.errno + except AttributeError: + # python 2.4 + errno = error.reason[0] + + if errno in [-3, 60, 61, 110, 111]: + b.log_line("unable to connect to %s... trying again" % (src_url)) + continue + else: + raise + + o = chroot.popen("cat > %s" % b.src_rpm, mode = "w") + + try: + bytes = util.sendfile(f, o) + except IOError, e: + b.log_line("error: unable to write to `%s': %s" % (b.src_rpm, e)) + raise + + f.close() + o.close() + t = time.time() - start + if t == 0: + b.log_line("fetched %d bytes" % bytes) + else: + b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t)) + +def prepare_env(): + chroot.run(""" + test ! -f /proc/uptime && mount /proc 2>/dev/null + test ! -c /dev/full && rm -f /dev/full && mknod -m 666 /dev/full c 1 7 + test ! -c /dev/null && rm -f /dev/null && mknod -m 666 /dev/null c 1 3 + test ! -c /dev/random && rm -f /dev/random && mknod -m 644 /dev/random c 1 8 + test ! -c /dev/urandom && rm -f /dev/urandom && mknod -m 644 /dev/urandom c 1 9 + test ! -c /dev/zero && rm -f /dev/zero && mknod -m 666 /dev/zero c 1 5 + + # need entry for "/" in mtab, for diskspace() to work in rpm + [ -z $(awk '$2 == "/" {print $1}' /etc/mtab) ] && mount -f -t rootfs rootfs / + + # make neccessary files readable for builder user + # TODO: see if they really aren't readable for builder + for db in Packages Name Basenames Providename Pubkeys; do + db=/var/lib/rpm/$db + test -f $db && chmod a+r $db + done + + # try to limit network access for builder account + /bin/setfacl -m u:builder:--- /etc/resolv.conf + """, 'root') + +def build_rpm(r, b): + if len(b.spec) <= 5: + # should not really get here + b.log_line("error: No .spec not given of malformed: '%s'" % b.spec) + res = "FAIL_INTERNAL" + return res + + packagename = b.spec[:-5] + status.push("building %s (%s)" % (b.spec, packagename)) + b.log_line("request from: %s" % r.requester) + + if check_skip_build(r, b): + b.log_line("build skipped due to src builder request") + res = "SKIP_REQUESTED" + return res + + b.log_line("started at: %s" % time.asctime()) + fetch_src(r, b) + b.log_line("installing srpm: %s" % b.src_rpm) + res = chroot.run(""" + # b.id %(bid)s + set -ex; + install -d rpm/packages/%(package)s rpm/BUILD/%(package)s; + rpm -Uhv %(rpmdefs)s %(src_rpm)s; + rm -f %(src_rpm)s; + """ % { + 'bid' : b.b_id, + 'package' : packagename, + 'rpmdefs' : b.rpmbuild_opts(), + 'src_rpm' : b.src_rpm + }, logfile = b.logfile) + b.files = [] + + # it's better to have TMPDIR and BUILD dir on same partition: + # + /usr/bin/bzip2 -dc /home/services/builder/rpm/packages/kernel/patch-2.6.27.61.bz2 + # patch: **** Can't rename file /tmp/B.a1b1d3/poKWwRlp to drivers/scsi/hosts.c : No such file or directory + tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s/tmp" % packagename + if res: + b.log_line("error: installing src rpm failed") + res = "FAIL_SRPM_INSTALL" + else: + prepare_env() + chroot.run("install -m 700 -d %s" % tmpdir) + + b.default_target(config.arch) + # check for build arch before filling BR + cmd = "set -ex; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \ + "rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' rpm/packages/%(package)s/%(spec)s" % { + 'tmpdir': tmpdir, + 'nice' : config.nice, + 'rpmdefs' : b.rpmbuild_opts(), + 'package' : packagename, + 'spec': b.spec, + } + res = chroot.run(cmd, logfile = b.logfile) + if res: + res = "UNSUPP" + b.log_line("error: build arch check (%s) failed" % cmd) + + if not res: + if ("no-install-br" not in r.flags) and not install.uninstall_self_conflict(b): + res = "FAIL_DEPS_UNINSTALL" + if ("no-install-br" not in r.flags) and not install.install_br(r, b): + res = "FAIL_DEPS_INSTALL" + if not res: + max_jobs = max(min(int(os.sysconf('SC_NPROCESSORS_ONLN') + 1), config.max_jobs), 1) + if r.max_jobs > 0: + max_jobs = max(min(config.max_jobs, r.max_jobs), 1) + cmd = "set -ex; : build-id: %(r_id)s; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \ + "rpmbuild -bb --define '_smp_mflags -j%(max_jobs)d' %(rpmdefs)s rpm/packages/%(package)s/%(spec)s" % { + 'r_id' : r.id, + 'tmpdir': tmpdir, + 'nice' : config.nice, + 'rpmdefs' : b.rpmbuild_opts(), + 'package' : packagename, + 'max_jobs' : max_jobs, + 'spec': b.spec, + } + b.log_line("building RPM using: %s" % cmd) + begin_time = time.time() + res = chroot.run(cmd, logfile = b.logfile) + end_time = time.time() + b.log_line("ended at: %s, done in %s" % (time.asctime(), datetime.timedelta(0, end_time - begin_time))) + if res: + res = "FAIL" + files = util.collect_files(b.logfile) + if len(files) > 0: + r.chroot_files.extend(files) + else: + b.log_line("error: No files produced.") + last_section = util.find_last_section(b.logfile) + if last_section == None: + res = "FAIL" + else: + res = "FAIL_%s" % last_section.upper() + b.files = files + + chroot.run(""" + set -ex; + rpmbuild %(rpmdefs)s --nodeps --nobuild --clean --rmspec --rmsource rpm/packages/%(package)s/%(spec)s + rm -rf %(tmpdir)s; + chmod -R u+rwX rpm/BUILD/%(package)s; + rm -rf rpm/BUILD/%(package)s; + """ % + {'tmpdir' : tmpdir, 'spec': b.spec, 'package' : packagename, 'rpmdefs' : b.rpmbuild_opts()}, logfile = b.logfile) + + def ll(l): + util.append_to(b.logfile, l) + + if b.files != []: + rpm_cache_dir = config.rpm_cache_dir + if "test-build" not in r.flags: + # NOTE: copying to cache dir doesn't mean that build failed, so ignore result + b.log_line("copy rpm files to cache_dir: %s" % rpm_cache_dir) + chroot.run( + "cp -f %s %s && poldek --mo=nodiff --mkidxz -s %s/" % \ + (string.join(b.files), rpm_cache_dir, rpm_cache_dir), + logfile = b.logfile, user = "root" + ) + else: + ll("test-build: not copying to " + rpm_cache_dir) + ll("Begin-PLD-Builder-Info") + if "upgrade" in r.flags: + b.upgraded = install.upgrade_from_batch(r, b) + else: + ll("not upgrading") + ll("End-PLD-Builder-Info") + + for f in b.files: + local = r.tmp_dir + os.path.basename(f) + chroot.cp(f, outfile = local, rm = True) + ftp.add(local) + + def uploadinfo(b): + c="file:SRPMS:%s\n" % b.src_rpm + for f in b.files: + c=c + "file:ARCH:%s\n" % os.path.basename(f) + c=c + "END\n" + return c + + if config.gen_upinfo and b.files != [] and 'test-build' not in r.flags: + fname = r.tmp_dir + b.src_rpm + ".uploadinfo" + f = open(fname, "w") + f.write(uploadinfo(b)) + f.close() + ftp.add(fname, "uploadinfo") + + status.pop() + + return res + +def handle_request(r): + ftp.init(r) + buildlogs.init(r) + build.build_all(r, build_rpm) + report.send_report(r, is_src = False) + ftp.flush() + notify.send(r) + +def check_load(): + do_exit = 0 + try: + f = open("/proc/loadavg") + if float(string.split(f.readline())[2]) > config.max_load: + do_exit = 1 + except: + pass + if do_exit: + sys.exit(0) + +def main_for(builder): + msg = "" + + init_conf(builder) + + q = B_Queue(path.queue_file + "-" + config.builder) + q.lock(0) + q.read() + if q.requests == []: + q.unlock() + return + req = pick_request(q) + q.unlock() + + # high priority tasks have priority < 0, normal tasks >= 0 + if req.priority >= 0: + + # allow only one build in given builder at once + if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1): + return + # don't kill server + check_load() + # not more then job_slots builds at once + locked = 0 + for slot in range(config.job_slots): + if lock.lock("building-rpm-slot-%d" % slot, non_block = 1): + locked = 1 + break + if not locked: + return + + # record fact that we got lock for this builder, load balancer + # will use it for fair-queuing + l = lock.lock("got-lock") + f = open(path.got_lock_file, "a") + f.write(config.builder + "\n") + f.close() + l.close() + else: + msg = "HIGH PRIORITY: " + + msg += "handling request %s (%d) for %s from %s, priority %s" \ + % (req.id, req.no, config.builder, req.requester, req.priority) + log.notice(msg) + status.push(msg) + handle_request(req) + status.pop() + + def otherreqs(r): + if r.no==req.no: + return False + else: + return True + + q = B_Queue(path.queue_file + "-" + config.builder) + q.lock(0) + q.read() + previouslen=len(q.requests) + q.requests=filter(otherreqs, q.requests) + if len(q.requests)&1" % + (config.nice, builder_opts, b.bconds_string(), b.branch, + tag_test, b.kernel_string(), b.spec)) + util.append_to(b.logfile, "request from: %s" % r.requester) + util.append_to(b.logfile, "started at: %s" % time.asctime()) + util.append_to(b.logfile, "building SRPM using: %s\n" % cmd) + res = chroot.run(cmd, logfile = b.logfile) + util.append_to(b.logfile, "exit status %d" % res) + files = util.collect_files(b.logfile) + if len(files) > 0: + if len(files) > 1: + util.append_to(b.logfile, "error: More than one file produced: %s" % files) + res = "FAIL_TOOMANYFILES" + last = files[len(files) - 1] + b.src_rpm_file = last + b.src_rpm = os.path.basename(last) + r.chroot_files.extend(files) + else: + util.append_to(b.logfile, "error: No files produced.") + res = "FAIL" + if res == 0 and not "test-build" in r.flags: + for pref in config.tag_prefixes: + util.append_to(b.logfile, "Tagging with prefix: %s" % pref) + res = chroot.run("cd rpm/packages; ./builder -r %s -Tp %s -Tv %s" % \ + (b.branch, pref, b.spec), logfile = b.logfile) + if res == 0: + transfer_file(r, b) + + packagedir = "rpm/packages/%s" % b.spec[:-5] + packagename = b.spec[:-5] + chroot.run("rpm/packages/builder -m %s" % \ + (b.spec,), logfile = b.logfile) + chroot.run("rm -rf %s" % packagedir, logfile = b.logfile) + status.pop() + + if res: + res = "FAIL" + return res + +def handle_request(r): + os.mkdir(path.srpms_dir + '/' + r.id) + os.chmod(path.srpms_dir + '/' + r.id, 0755) + ftp.init(r) + buildlogs.init(r) + build.build_all(r, build_srpm) + report.send_report(r, is_src = True) + report.send_cia_report(r, is_src = True) + store_binary_request(r) + ftp.flush() + notify.send(r) + +def main(): + init_conf("src") + if lock("building-srpm", non_block = 1) == None: + return + while True: + status.push("srpm: processing queue") + q = B_Queue(path.queue_file) + if not q.lock(1): + status.pop() + return + q.read() + if q.requests == []: + q.unlock() + status.pop() + return + r = pick_request(q) + q.write() + q.unlock() + status.pop() + status.push("srpm: handling request from %s" % r.requester) + handle_request(r) + status.pop() + +if __name__ == '__main__': + loop.run_loop(main) diff --git a/pld-builder.new/PLD_Builder/status.py b/pld-builder.new/PLD_Builder/status.py new file mode 100644 index 0000000..8591d18 --- /dev/null +++ b/pld-builder.new/PLD_Builder/status.py @@ -0,0 +1,15 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +state = [] +email = "" +admin = "" +builder_list = "" + +def push(s): + state.append(s) + +def pop(): + state.pop() + +def get(): + return "%s" % state diff --git a/pld-builder.new/PLD_Builder/stopwatch.py b/pld-builder.new/PLD_Builder/stopwatch.py new file mode 100644 index 0000000..151c164 --- /dev/null +++ b/pld-builder.new/PLD_Builder/stopwatch.py @@ -0,0 +1,45 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import time +import resource + +class Time: + def __init__(self): + x = resource.getrusage(resource.RUSAGE_CHILDREN) + self.user_time = x[0] + self.sys_time = x[1] + self.non_io_faults = x[6] + self.io_faults = x[7] + self.time = time.time() + + def sub(self, x): + self.user_time -= x.user_time + self.sys_time -= x.sys_time + self.non_io_faults -= x.non_io_faults + self.io_faults -= x.io_faults + self.time -= x.time + + def format(self): + return "user:%.2fs sys:%.2fs real:%.2fs (faults io:%d non-io:%d)" % \ + (self.user_time, self.sys_time, self.time, self.io_faults, + self.non_io_faults) + +class Timer: + def __init__(self): + self.starts = [] + + def start(self): + self.starts.append(Time()) + + def stop(self): + tmp = Time() + tmp.sub(self.starts.pop()) + return tmp.format() + +t = Timer() + +def start(): + t.start() + +def stop(): + return t.stop() diff --git a/pld-builder.new/PLD_Builder/util.py b/pld-builder.new/PLD_Builder/util.py new file mode 100644 index 0000000..316a8f1 --- /dev/null +++ b/pld-builder.new/PLD_Builder/util.py @@ -0,0 +1,75 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import re +import sys +import os +import log +import string + +def uuid_python(): + return str(uuid_random()) + +def uuid_external(): + f = os.popen("uuidgen 2>&1") + u = string.strip(f.read()) + f.close() + if len(u) != 36: + raise Exception, "uuid: fatal, cannot generate uuid: %s" % u + return u + +# uuid module available in python >= 2.5 +try: + from uuid import uuid4 as uuid_random +except ImportError: + uuid = uuid_external +else: + uuid = uuid_python + +def pkg_name(nvr): + return re.match(r"(.+)-[^-]+-[^-]+", nvr).group(1) + +def msg(m): + sys.stderr.write(m) + +def sendfile(src, dst): + cnt = 0 + while 1: + s = src.read(10000) + if s == "": break + cnt += len(s) + dst.write(s) + return cnt + +def append_to(log, msg): + f = open(log, "a") + f.write("%s\n" % msg) + f.close() + +def clean_tmp(dir): + # FIXME: use python + os.system("rm -f %s/* 2>/dev/null; rmdir %s 2>/dev/null" % (dir, dir)) + +def collect_files(log): + f = open(log, 'r') + rx = re.compile(r"^Wrote: (/home.*\.rpm)$") + files = [] + for l in f.xreadlines(): + m = rx.search(l) + if m: + files.append(m.group(1)) + f.close() + return files + +def find_last_section(log): + f = open(log, 'r') + rx1 = re.compile(r"^Executing\(%(\w+)\).*$") + rx2 = re.compile(r"^Processing (files):.*$") + last_section = None + for l in f: + m = rx1.search(l) + if not m: + m = rx2.search(l) + if m: + last_section = m.group(1) + f.close() + return last_section diff --git a/pld-builder.new/PLD_Builder/wrap.py b/pld-builder.new/PLD_Builder/wrap.py new file mode 100644 index 0000000..b9809a3 --- /dev/null +++ b/pld-builder.new/PLD_Builder/wrap.py @@ -0,0 +1,56 @@ +# vi: encoding=utf-8 ts=8 sts=4 sw=4 et + +import sys +import log +import traceback +import StringIO +import os +import time + +# this module, as it deals with internal error handling shouldn't +# import anything beside status +import status + +try: + import mailer + def sendmail(trace): + m = mailer.Message() + m.set_headers(to = status.admin, cc = "%s, %s" % (status.email, status.builder_list), subject = "fatal python exception") + m.write("%s\n" % trace) + m.write("during: %s\n" % status.get()) + m.send() +except: + def sendmail(trace): + # don't use mailer.py; it safer this way + f = os.popen("/usr/sbin/sendmail -i -t", "w") + f.write("""Subject: builder failure +To: %s +Cc: %s, %s +Date: %s +X-PLD-Builder: fatal error report + +%s + +during: %s +""" % (status.admin, status.email, status.builder_list, + time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()), + trace, status)) + f.close() + +def wrap(main): + try: + main() + except: + exctype, value = sys.exc_info()[:2] + if exctype == SystemExit: + sys.exit(value) + s = StringIO.StringIO() + traceback.print_exc(file = s, limit = 20) + + log.alert("fatal python exception") + log.alert(s.getvalue()) + log.alert("during: %s" % status.get()) + + sendmail(s.getvalue()) + + sys.exit(1) diff --git a/pld-builder.new/README b/pld-builder.new/README new file mode 100644 index 0000000..cc9fb47 --- /dev/null +++ b/pld-builder.new/README @@ -0,0 +1,54 @@ +Roadmap: + + Makefile -- nothing interesting. + + PLD_Builder/ -- all python stuff live here. + + admin/ -- scripts for chroot admin. + + architektura.txt -- docs, in Polish. + + client/ -- scripts for clients, i.e. developers sending requests + + config/ -- configuration + + go.py -- testing script, don't touch + + bin/ -- scripts for builder account outside chroot + + request-handler.sh + Script to be run from procmail on src builder. It process incoming + request, authorizes it and stores in spool/queue. + + src-builder.sh + Script to be run from cron on src builder. It looks for new + requests in spool/queue, gets them from there, builds SRPM, and + stores request for binary builders in spool/req_queue (which is + mirrored in www/ directory, signed and compressed). SRPMS and + buildlogs are queued for transmission. + + request-fetcher.sh + Run on binary builder. + Fetch queue.gz from src-builder, and distribute requests for all + builders hosted on given account (to spool/queue- files). + + rpm-builder.sh + Run on binary builder. + Tries to aquire locks for and for job-slot. If that suceeds, + proccess one request from spool/queue-. + + load-balancer.sh + Run on binary builder. + Checks spool/got_lock. Then run rpm-builder.sh in order determined + from this file (if b1 had lock last time before b2, first run + rpm-builder.sh b1 and then rpm-builder.sh b2), so builders get + the same number of requests. + + file-sender.sh + Run on both binary and src builder. + Sends files queued in spool/{ftp,buildlogs}. + +Working directories: + lock/ + spool/ + log/ diff --git a/pld-builder.new/README.bin-builder b/pld-builder.new/README.bin-builder new file mode 100644 index 0000000..89a7b77 --- /dev/null +++ b/pld-builder.new/README.bin-builder @@ -0,0 +1,86 @@ +new bin builder setup + +packages and chroot +~~~~~~~~~~~~~~~~~~~ +1. install pld-builder from th repoeistory on target host + +2. create chroot /srv/chroot +(you can symlink to real dest for the sake of copy-paste from here) +mkdir -p /srv/chroot + +- if you're using rpm < 4.5-29, make system rpmdb linked to target rpmdb, as + rpm gets it all very messy. see LP#395177. +mv /var/lib/rpm{,-host} +ln -s /srv/chroot/var/lib/rpm /var/lib/rpm +install -d /srv/chroot/var/lib/rpm +rpm -r /srv/chroot --initdb + +therwise it's just: +rpm -r /srv/chroot --initdb + +- install distro gpg key as default th config packages are signed and sign verify enabled in config: +rpm -r /srv/chroot --import /etc/pki/rpm-gpg/PLD-*.asc + +- setup minimal /dev +install -d /srv/chroot/dev +cp -a /dev/{full,null,random,urandom,zero} /srv/chroot/dev + +- install vserver-packages, but as it is usually hidden, so you must unhide it + with --noignore: +poldek -r /srv/chroot -u vserver-packages -Q --noignore + +- install pld-builder-chroot from th repos +poldek -r /srv/chroot -u pld-builder-chroot -Q --sn th --sn th-ready + +- make rpmdb readable for builder user +chmod -R a+rX /srv/chroot/var/lib/rpm + +- setup /srv/chroot/etc/resolv.conf so if you enter manually you can work with poldek +cat /etc/resolv.conf > /srv/chroot/etc/resolv.conf + +- restore rpmdb hack +rm -f /var/lib/rpm +mv /var/lib/rpm{-host,} + +gpg keys +~~~~~~~~ +1. import src builder key to bin builder so it can download queue.gz + +src-builder$ gpg --export builder-th-src@pld-linux.org --armor > th-src.asc +bin-builder$ gpg --import < th-src.asc + +2. generate new key for bin builder and import it to src builder so it can + accept spool/notify messages + +3. import that public key to src builder keyring +bin-builder$ gpg --gen-key +bin-builder$ gpg --export KEYID --armor > th-i686.asc +src-builder$ gpg --import < th-i686.asc + +ssh keys +~~~~~~~~ + +generate key on bin builder and add it to authorized_keys of ftp account + +i.e account where you push your uploads: +[th-i686] +ftp_url = scp://fpldth@ep09.pld-linux.org:ftp/.tree/.incoming/i686/ + +bin-builder$ ssh-keygen +bin-builder$ ssh-copy-id -i .ssh/id_rsa.pub fpldth@ep09.pld-linux.org + +buildlogs +~~~~~~~~~ +buildlogs are copied with rsync. ask buildlogs.pld-linux.org admin to allow your ip +also you need to setup password that is used to authenticate in rsync-passwords + +sudo access +~~~~~~~~~~~ +make sure builder user (who runs crons) can sudo chroot to the chroots: +builder ALL=(ALL) NOPASSWD: /usr/sbin/chroot /home/users/builder/chroot-th * + +testing +~~~~~~~ + +keep /var/lib/pld-builder/spool/log running with tail -f +run the cronjobs under builder account. diff --git a/pld-builder.new/TODO b/pld-builder.new/TODO new file mode 100644 index 0000000..3ba8da0 --- /dev/null +++ b/pld-builder.new/TODO @@ -0,0 +1,46 @@ +- Write spec preprocessor, that processes given spec file and expands + macros. Next we should process the output, look for all BuildRequires: + lines, and install them, instead of running rpmbuild --nobuild to see + what's missing, since it chokes when stuff is used in %(...), on missing + includes and so on. + + get_br.py is close, but doesn't handle: + %if foo + BuildRequires: ... + %endif + which in fact requires expression parser :< + +- implement: + shell code + (run poldek --up; poldek -Q --shcmd 'upgrade -G *') + First requires command-shell:, second command-upgrade:. + This should be class in requester.py, with kind='command', also look for + kind checking elsewhere in the code (grep for 'kind.*group') + +- make default configs reasonable + +- fix make-request.sh to be more user-friendly and configurable + +- add possibility of tagging stuff for source builder + +- jabber daemon. it should connect to jabber server and login to + conference room. Next open unix socket, so other scripts can write it, + and jabber server forwards this to conference room. + + Next step for such daemon would be to create second unix socket, where + scripts log what thay are doing so, we can do things like tail on + current buildlog. + +- add log.debug (log only with special options) to log.py, and use it + +- if magic comment found in spec: single-build -- aquire all job-slots + +- allow blocking some packages + +- fetch src.rpm once for all builders on this account, separate fetching + src.rpms, so we can build and fetch at the same time + +- unify buildlogs.py and ftp.py, both are file queues + +- ability to just copy src.rpm from ftp and make it the base for a request to + bin builders diff --git a/pld-builder.new/admin/.cvsignore b/pld-builder.new/admin/.cvsignore new file mode 100644 index 0000000..670005b --- /dev/null +++ b/pld-builder.new/admin/.cvsignore @@ -0,0 +1 @@ +tmp-chroot diff --git a/pld-builder.new/admin/fresh-queue.sh b/pld-builder.new/admin/fresh-queue.sh new file mode 100755 index 0000000..5d80086 --- /dev/null +++ b/pld-builder.new/admin/fresh-queue.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +umask 077 + +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +if [ -d "$BUILDERPATH" ]; then + cd "$BUILDERPATH" +else + echo "the $BUILDERPATH directory does not exist" + exit 1 +fi + + +if [ -f "$BUILDERPATH/config/global" ]; then + . $BUILDERPATH/config/global +fi + +if [ "$1" != "y" ] ; then + echo "this scripts kills current queue and installs new" + echo "run '$0 y' to run it" + exit 1 +fi + +mkdir -p spool/{builds,buildlogs,notify,ftp} www/srpms lock +echo 0 > www/max_req_no +echo 0 > spool/last_req_no +echo -n > spool/processed_ids +echo -n > spool/got_lock +echo '' > spool/queue +echo '' > spool/req_queue +test ! -z "$binary_builders" && for bb in $binary_builders; do + echo '' > spool/queue-$bb +done + +chmod 755 www www/srpms +chmod 644 www/max_req_no diff --git a/pld-builder.new/admin/install-chroot.sh b/pld-builder.new/admin/install-chroot.sh new file mode 100755 index 0000000..937b877 --- /dev/null +++ b/pld-builder.new/admin/install-chroot.sh @@ -0,0 +1,218 @@ +#!/bin/sh + +DIST="th" +DISTTAG="PLD 3.0 (Th)" + +die () { + echo "$0: $*" 1>&2 + cat 1>&2 </ (required in src builder) + git_user= (required in src builder) + builder_uid=2000 (optional, uid of builder user + in chroot; defaults to current uid) +EOF + exit 1 +} + +default_config () { + builder_pkgs="rpm-build poldek pwdutils net-tools which rpm-perlprov rpm-php-pearprov rpm-pythonprov bash vim" + builder_uid=`id -u` + dist_url="ftp://ftp.$DIST.pld-linux.org" + + case "$chroot_type" in + src ) + builder_arch_pkgs="wget gawk git-core" + ;; + bin ) + builder_arch_pkgs="mount" + ;; + esac +} + +check_conf () { + test "$chroot_dir" || die "no chroot_dir" + test "$arch" || die "no arch" + test "$dist_url" || die "no dist_url" + + case "$chroot_type" in + src ) + test "$git_server" || die "no git_server" + test "$git_user" || die "no git_user" + ;; + bin ) + ;; + * ) + die "evil chroot_type: $chroot_type" + ;; + esac +} + +poldek_src () { + if test "$1" ; then + cat <install-specs <~/.rpmmacros +echo "%vendor PLD">>~/.rpmmacros +echo "%distribution $DISTTAG">>~/.rpmmacros +git config --global user.name $git_user +git config --global user.email ${git_user}@pld-linux.org +EOF + chb "sh" < install-specs + rm install-specs + echo "WARNING: Do not forget to install ssh keys to access git repo" +} + +install_build_tree () { + cat >install-bt <~/.rpmmacros +echo "%vendor PLD">>~/.rpmmacros +echo "%distribution $DISTTAG">>~/.rpmmacros +EOF + chb "sh" < install-bt + rm install-bt +} + + + + +eval "$*" || usage +default_config +eval "$*" +check_conf + +rm -rf tmp-chroot +mkdir tmp-chroot +cd tmp-chroot + +cat >poldek.conf < install-$chroot_name.sh <poldek.conf < /etc/resolv.conf" < /etc/resolv.conf +chr "cat > /etc/mtab" < /dev/null +chr "mkdir -p /spools/ready/" < /dev/null +chr "mkdir -p /spools/poldek/" < /dev/null +chr "sed -e 's,^\(root:.*\)/bin/sh$,\1/bin/bash,' -i~ /etc/passwd" + + +case $chroot_type in + src ) + install_SPECS_builder + ;; + bin ) + install_build_tree + ;; +esac diff --git a/pld-builder.new/bin/file-sender.sh b/pld-builder.new/bin/file-sender.sh new file mode 100755 index 0000000..5babb4c --- /dev/null +++ b/pld-builder.new/bin/file-sender.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 + +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +cd $BUILDERPATH +exec python PLD_Builder/file_sender.py diff --git a/pld-builder.new/bin/load-balancer.sh b/pld-builder.new/bin/load-balancer.sh new file mode 100755 index 0000000..055306a --- /dev/null +++ b/pld-builder.new/bin/load-balancer.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 + +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +cd $BUILDERPATH +exec python PLD_Builder/load_balancer.py diff --git a/pld-builder.new/bin/maintainer.sh b/pld-builder.new/bin/maintainer.sh new file mode 100755 index 0000000..e490b02 --- /dev/null +++ b/pld-builder.new/bin/maintainer.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 + +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +cd $BUILDERPATH +exec python PLD_Builder/maintainer.py diff --git a/pld-builder.new/bin/request-fetcher.sh b/pld-builder.new/bin/request-fetcher.sh new file mode 100755 index 0000000..1b446f7 --- /dev/null +++ b/pld-builder.new/bin/request-fetcher.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 + +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +cd $BUILDERPATH +exec python PLD_Builder/request_fetcher.py diff --git a/pld-builder.new/bin/request-handler.sh b/pld-builder.new/bin/request-handler.sh new file mode 100755 index 0000000..8c2ab85 --- /dev/null +++ b/pld-builder.new/bin/request-handler.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +if lockfile -r3 $HOME/.builder_request_handler.lock 2>/dev/null; then + trap "rm -f $HOME/.builder_request_handler.lock" 1 2 3 13 15 + cd $BUILDERPATH + python PLD_Builder/request_handler.py + rm -f $HOME/.builder_request_handler.lock +else + return 1 +fi diff --git a/pld-builder.new/bin/rpm-builder.sh b/pld-builder.new/bin/rpm-builder.sh new file mode 100755 index 0000000..7e678e2 --- /dev/null +++ b/pld-builder.new/bin/rpm-builder.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +cd $BUILDERPATH +exec python PLD_Builder/rpm_builder.py $1 diff --git a/pld-builder.new/bin/src-builder.sh b/pld-builder.new/bin/src-builder.sh new file mode 100755 index 0000000..c17e36b --- /dev/null +++ b/pld-builder.new/bin/src-builder.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +umask 022 +export LC_CTYPE=en_US.iso-8859-1 +CONFIG=$HOME/.pldbuilderrc +[ -f "$CONFIG" ] && . $CONFIG +[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/" +export BUILDERPATH + +cd $BUILDERPATH +exec python PLD_Builder/srpm_builder.py diff --git a/pld-builder.new/chroot/rpmqa-chroot b/pld-builder.new/chroot/rpmqa-chroot new file mode 100755 index 0000000..588b04c --- /dev/null +++ b/pld-builder.new/chroot/rpmqa-chroot @@ -0,0 +1,11 @@ +#!/bin/sh +# generate rpm -qa without locking database + +set -e +umask 077 +dir=`mktemp -d /tmp/db-XXXXXX` +cp /var/lib/rpm/Packages $dir +cp /var/lib/rpm/Name $dir +rpm --dbpath $dir -qa | sort +rm -rf $dir + diff --git a/pld-builder.new/client/kde4send2builders.sh b/pld-builder.new/client/kde4send2builders.sh new file mode 100755 index 0000000..1db89d2 --- /dev/null +++ b/pld-builder.new/client/kde4send2builders.sh @@ -0,0 +1,156 @@ +#!/bin/sh +# Authors: +# - Bartosz Świątek (shadzik@pld-linux.org) +# - Elan Ruusamäe (glen@pld-linux.org) +# +# helps sending kde4 specs in proper order with or without autotags + +usage() { + echo "Usage: $0 OPTIONS SPECS" + echo "" + echo "Where OPTIONS are:" + echo "" + echo " -d --distro VALUE" + echo " set distro, probably th or ti will fit the most" + echo " -at --with-auto-tag" + echo " send with current autotag, default no" + echo " -b --builder VALUE" + echo " choose a particular builder, default all" + echo " -p --priority VALUE (default: 2)" + echo " -h --help" + echo " show this help" + echo "" + echo "Choose SPECS out of:" + echo "" + echo "all - all kde4-* (libs, base, other, koffice, l10n)" + echo "libs - kde4-kdelibs and kde4-kdepimlibs" + echo "base - kde4-kdebase* kde4-oxygen-icons" + echo "other - all other kde4-* except libs and base" + echo "koffice - kde4-koffice" + echo "l10n - kde4-l10n" + echo "kdevelop - kde4-devplatform, kde4-kdevelop-*" + echo "almost-all - all but koffice and l10n" + echo "" + exit 0 +} + +DIST= +ATAG=no +SENDPRIO= +BUILDER= +PRIO=2 +#SPECDIR=$(rpm -E %_specdir) +SPECDIR=~/rpm + +LIBS="kde4-kdelibs.spec kde4-kdepimlibs.spec" +BASE="kde4-oxygen-icons.spec kde4-kdebase-runtime.spec kde4-kdebase-workspace.spec kde4-kwebkitpart.spec kde4-kdebase.spec" +OTHER="kde4-kdemultimedia.spec kde4-kdegraphics.spec \ +kde4-kdenetwork.spec \ +kde4-kdepim.spec \ +kde4-kdepim-runtime.spec \ +kde4-kdeartwork.spec \ +kde4-kdewebdev.spec \ +kde4-kdeutils.spec \ +kde4-kdeaccessibility.spec \ +kde4-kdebindings.spec \ +kde4-kdegames.spec \ +kde4-kdeedu.spec \ +kde4-kdeplasma-addons.spec \ +kde4-kdesdk.spec \ +kde4-kdeadmin.spec \ +kde4-kdetoys.spec" +KOFFICE="kde4-koffice.spec kde4-koffice-l10n.spec" +L10N="kde4-l10n.spec" +KDEVELOP="kde4-kdevplatform.spec \ +kde4-kdevelop.spec +kde4-kdevelop-plugin-php.spec" + +while [ $# -gt 0 ]; do + case "$1" in + --distro | -d ) + DIST=$2 + shift + ;; + + --with-auto-tag | -at ) + ATAG=yes + ;; + + --builder | -b ) + BUILDER="$BUILDER $2" + shift + ;; + + --priority | -p ) + PRIO=$2 + shift + ;; + + --help | -h ) + usage + ;; + + -* ) + die "Unknow option: $1" + ;; + + *:* | * ) + specs="$specs $1" + ;; + esac + shift +done + +specs=`for s in $specs; do + case "$s" in + all) # all kde4 specs + echo $LIBS $BASE $OTHER $KOFFICE $L10N + ;; + libs) # kde4 libs, libs-experimental and pimlibs + echo $LIBS + ;; + base) # kde4-kdebase-* + echo $BASE + ;; + other) # kde4-* + echo $OTHER + ;; + koffice) # kde4-koffice + echo $KOFFICE + ;; + l10n) # kde4-l10n + echo $L10N + ;; + kdevelop) # kde4-kdevplatform and kde4-kdevelop-* + echo $KDEVELOP + ;; + almost-all) # all but koffice and l10n + echo $LIBS $BASE $OTHER + ;; + *) # not listed ones + echo $s + ;; + esac +done` + +if [ "$DIST" == "ti-dev" ]; then + disttag="ti" +else + disttag=$DIST +fi + +if [ "$ATAG" == "yes" ]; then + for spec in $specs; do + PKG=$(echo $spec |sed -e 's/.spec//g') + LAST_AUTOTAG=$(cd $SPECDIR/packages && ./builder -g -ns $PKG/$spec >/dev/null 2>&1 && cvs status -v $PKG/$spec | awk -vdist=$disttag '!/Sticky/ && $1 ~ "^auto-" dist "-"{if (!a++) print $1}') + sleep 1 + SENDPRIO="$SENDPRIO $spec:$LAST_AUTOTAG " + done +else + SENDPRIO=$specs +fi + +dir=$(dirname "$0") +exec $dir/make-request.sh ${DIST:+-d $DIST} ${BUILDER:+-b "$BUILDER"} -p $PRIO -r $SENDPRIO +echo >&2 "Failed to execute ./make-request.sh!" +exit 1 diff --git a/pld-builder.new/client/make-request.sh b/pld-builder.new/client/make-request.sh new file mode 100755 index 0000000..35b94f9 --- /dev/null +++ b/pld-builder.new/client/make-request.sh @@ -0,0 +1,788 @@ +#!/bin/sh + +# prevent "*" from being expanded in builders var +set -f + +builders= +with= +without= +flags= +command= +command_flags= +gpg_opts= +default_branch='HEAD' +dist= +url= +no_depend=no +verbose=no +autotag=no + +if [ -x /usr/bin/python ]; then + send_mode="python" +else + echo "No python present, using mail mode" + send_mode="mail" +fi + +if [ -n "$HOME_ETC" ]; then + USER_CFG=$HOME_ETC/.requestrc +else + USER_CFG=$HOME/.requestrc +fi + +if [ ! -f "$USER_CFG" ]; then + echo "Creating config file $USER_CFG. You *must* edit it." + cat > $USER_CFG <&2 "${c_star}*${c_norm} $*" +} +red() { + echo "${c_red}$*${c_norm}" +} + +die() { + echo >&2 "$0: $*" + exit 1 +} + +send_request() { + # switch to mail mode, if no url set + [ -z "$url" ] && send_mode="mail" + + case "$send_mode" in + "mail") + msg "Sending using mail mode" + cat - | $mailer + ;; + *) + msg "Sending using http mode to $url" + cat - | python -c ' +import sys, socket, urllib2 + +try: + data = sys.stdin.read() + url = sys.argv[1] + socket.setdefaulttimeout(10) + req = urllib2.Request(url, data) + f = urllib2.urlopen(req) + f.close() +except Exception, e: + print >> sys.stderr, "Problem while sending request via HTTP: %s: %s" % (url, e) + sys.exit(1) +print >> sys.stdout, "Request queued via HTTP." +' "$url" + ;; + esac +} + +# simple df_fetcher, based on packages/fetchsrc_request +# TODO: tcp (smtp) mode +# TODO: adjust for ~/.requestrc config +df_fetch() { + local specs="$@" + + # Sending by + local MAILER='/usr/sbin/sendmail' + # MAILER='/usr/bin/msmtp' + # Sending via + local VIA="SENDMAIL" + #VIA="localhost" + local VIA_ARGS="" + #VIA_ARGS="some additional flags" + # e.g. for msmtp: + # VIA_ARGS='-a gmail' + # + # DISTFILES EMAIL + local DMAIL="distfiles@pld-linux.org" + + local HOST=$(hostname -f) + local LOGIN=${requester%@*} + + for spec in $specs; do + local SPEC=$(echo "$spec" | sed -e 's|:.*||') + local BRANCH=$(echo "$spec" | sed -e 's|.*:||') + echo >&2 "Distfiles Request: $SPEC:$BRANCH via $MAILER ${VIA_ARGS:+ ($VIA_ARGS)}" + cat <<-EOF | "$MAILER" -t -i $VIA_ARGS + To: $DMAIL + From: $LOGIN <$LOGIN@$HOST> + Subject: fetchsrc_request notify + X-CVS-Module: SPECS + X-distfiles-request: yes + X-Login: $LOGIN + X-Spec: $SPEC + X-Branch: $BRANCH + X-Flags: force-reply + + . + EOF + done +} + +# autotag from rpm-build-macros +# displays latest used tag for a specfile +autotag() { + local out s + for s in "$@"; do + # strip branches + s=${s%:*} + # ensure package ends with .spec + s=${s%.spec}.spec + out=$(cvs status -v $s | awk "!/Sticky/&&/auto-$dist-/{if (!a++) print \$1}") + echo "$s:$out" + done +} + +# get autotag for specs +# WARNING: This may checkout some files from CVS +get_autotag() { + local pkg spec rpmdir + + rpmdir=$(rpm -E %_topdir) + cd $rpmdir + for pkg in "$@"; do + # strip branches + pkg=${pkg%:*} + # strip .spec extension + pkg=${pkg%.spec} + # checkout only if missing + if [ ! -e $pkg/$pkg.spec ]; then + $rpmdir/builder -g $pkg -ns -r HEAD 1>&2 + fi + if [ ! -e $pkg/$pkg.spec ]; then + # just print it out, to fallback to base pkg name + echo "$pkg" + else + autotag $pkg/$pkg.spec + fi + done +} + +usage() { + cat <