--- /dev/null
+*.pyc
+lock
+log
+spool
+www
--- /dev/null
+PACKAGE := pld-builder
+VERSION := 0.5
+SNAP := $(shell date +%Y%m%d)
+
+# for make dist
+CVSROOT := :pserver:cvs@cvs.pld-linux.org:/cvsroot
+CVSMODULE := pld-builder.new
+CVSTAG := HEAD
+
+all:
+ python -c "import compileall; compileall.compile_dir('.')"
+
+clean:
+ find -name '*.pyc' | xargs rm -f
+
+dist:
+ rm -rf $(PACKAGE)-$(VERSION).$(SNAP)
+ mkdir -p $(PACKAGE)-$(VERSION).$(SNAP)
+ cvs -d $(CVSROOT) export -d $(PACKAGE)-$(VERSION).$(SNAP) -r $(CVSTAG) $(CVSMODULE)
+ tar -cjf $(PACKAGE)-$(VERSION).$(SNAP).tar.bz2 $(PACKAGE)-$(VERSION).$(SNAP)
+ rm -rf $(PACKAGE)-$(VERSION).$(SNAP)
+ test -x ./dropin && ./dropin $(PACKAGE)-$(VERSION).$(SNAP).tar.bz2
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import ConfigParser
+import string
+import fnmatch
+import os
+import stat
+
+import path
+import log
+import status
+from mailer import Message
+from config import config
+
+class User:
+ def __init__(self, p, login):
+ self.login = login
+ self.privs = []
+ self.gpg_emails = []
+ self.mailto = ""
+
+ if p.has_option(login, "gpg_emails"):
+ self.gpg_emails = string.split(p.get(login, "gpg_emails"))
+ else:
+ log.panic("acl: [%s] has no gpg_emails" % login)
+
+ if p.has_option(login, "mailto"):
+ self.mailto = p.get(login, "mailto")
+ else:
+ if len(self.gpg_emails) > 0:
+ self.mailto = self.gpg_emails[0]
+
+ if p.has_option(login, "privs"):
+ for p in string.split(p.get(login, "privs")):
+ l = string.split(p, ":")
+ if len(l) == 2:
+ p+=":*"
+ if len(l) not in (2,3) or l[0] == "" or l[1] == "":
+ log.panic("acl: invalid priv format: '%s' [%s]" % (p, login))
+ else:
+ self.privs.append(p)
+ else:
+ log.panic("acl: [%s] has no privs" % login)
+
+ def can_do(self, what, where, branch=None):
+ if branch:
+ action = "%s:%s:%s" % (what, where, branch)
+ else:
+ action = "%s:%s:N-A" % (what, where)
+ for priv in self.privs:
+ if priv[0] == "!":
+ ret = 0
+ priv = priv[1:]
+ else:
+ ret = 1
+ pwhat,pwhere,pbranch=priv.split(":")
+ for pbranch in pbranch.split(","):
+ priv="%s:%s:%s" % (pwhat,pwhere,pbranch)
+ if fnmatch.fnmatch(action, priv):
+ return ret
+ return 0
+
+ def check_priority(self, prio, where):
+ for priv in self.privs:
+ val,builder=priv.split(":")[0:2]
+ if fnmatch.fnmatch(where, builder):
+ try:
+ val=int(val)
+ except ValueError:
+ continue
+ if prio>=val:
+ return prio
+ else:
+ return val
+ return prio
+
+ def mail_to(self):
+ return self.mailto
+
+ def message_to(self):
+ m = Message()
+ m.set_headers(to = self.mail_to(), cc = config.builder_list)
+ return m
+
+ def get_login(self):
+ return self.login
+
+class ACL_Conf:
+ def __init__(self):
+ self.reload()
+
+ def try_reload(self):
+ mtime = os.stat(path.acl_conf)[stat.ST_MTIME]
+ if mtime != self.acl_conf_mtime:
+ log.notice("acl.conf has changed, reloading...")
+ self.reload()
+ return True
+ return False
+
+ def reload(self):
+ self.acl_conf_mtime = os.stat(path.acl_conf)[stat.ST_MTIME]
+ self.current_user = None
+ status.push("reading acl.conf")
+ p = ConfigParser.ConfigParser()
+ p.readfp(open(path.acl_conf))
+ self.users = {}
+ for login in p.sections():
+ if self.users.has_key(login):
+ log.panic("acl: duplicate login: %s" % login)
+ continue
+ user = User(p, login)
+ for e in user.gpg_emails:
+ if self.users.has_key(e):
+ log.panic("acl: user email colision %s <-> %s" % \
+ (self.users[e].login, login))
+ else:
+ self.users[e] = user
+ self.users[login] = user
+ status.pop()
+
+ def user_by_email(self, ems):
+ for e in ems:
+ if self.users.has_key(e):
+ return self.users[e]
+ return None
+
+ def user(self, l):
+ if not self.users.has_key(l):
+ log.panic("no such user: %s" % l)
+ return self.users[l]
+
+ def set_current_user(self, u):
+ self.current_user = u
+ if u != None:
+ status.email = u.mail_to()
+
+ def current_user_login(self):
+ if self.current_user != None:
+ return self.current_user.login
+ else:
+ return ""
+
+acl = ACL_Conf()
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import re
+import gzip
+import time
+import StringIO
+import os
+import fcntl
+import string
+import tempfile
+
+# PLD_Builder:
+import gpg
+import request
+import util
+import log
+
+class B_Queue:
+ def __init__(self, filename):
+ self.name = filename
+ self.requests = []
+ self.fd = None
+
+ def dump(self, fname):
+ (fdno, tmpfname) = tempfile.mkstemp(dir=os.path.dirname(fname))
+ f = os.fdopen(fdno, "w")
+ self.requests.reverse()
+ for r in self.requests:
+ r.dump(f)
+ self.requests.reverse()
+ f.flush()
+ os.fsync(f.fileno())
+ f.close()
+ os.chmod(tmpfname, 0644)
+ os.rename(tmpfname, fname)
+
+ def dump_html(self, fname):
+ (fdno, tmpfname) = tempfile.mkstemp(dir=os.path.dirname(fname))
+ f = os.fdopen(fdno, "w")
+ f.write("""
+<html>
+ <head>
+ <link rel="Shortcut Icon" href="http://www.pld-linux.org/favicon.ico"/>
+ <title>PLD builder queue</title>
+ <link rel="stylesheet" type="text/css" charset="utf-8" media="all" href="style.css">
+ <script type="text/javascript" src="script.js"></script>
+ </head>
+<body>\n"""
+ )
+ self.requests.reverse()
+ for r in self.requests:
+ r.dump_html(f)
+ self.requests.reverse()
+ f.write("</body></html>\n")
+ f.flush()
+ os.fsync(f.fileno())
+ f.close()
+ os.chmod(tmpfname, 0644)
+ os.rename(tmpfname, fname)
+
+ # read possibly compressed, signed queue
+ def read_signed(self):
+ if re.search(r"\.gz$", self.name):
+ f = gzip.open(self.name)
+ else:
+ f = open(self.name)
+ (signers, body) = gpg.verify_sig(f.read())
+ self.signers = signers
+ self.requests = request.parse_requests(body)
+
+ def _open(self):
+ if self.fd == None:
+ if os.access(self.name, os.F_OK):
+ self.fd = open(self.name, "r+")
+ else:
+ self.fd = open(self.name, "w+")
+
+ def read(self):
+ self._open()
+ self.signers = []
+ body = self.fd.read()
+ if string.strip(body) == "":
+ # empty file, don't choke
+ self.requests = []
+ return
+ try:
+ self.requests = request.parse_requests(body)
+ except Exception, e:
+ log.panic("error parsing %s: %s" % (self.name, e))
+ pass
+
+ def _write_to(self, f):
+ f.write("<queue>\n")
+ for r in self.requests:
+ r.write_to(f)
+ f.write("</queue>\n")
+
+ def write(self):
+ self._open()
+ self.fd.seek(0)
+ self.fd.truncate(0)
+ self._write_to(self.fd)
+ self.fd.flush()
+
+ def lock(self, no_block):
+ self._open()
+ op = fcntl.LOCK_EX
+ if no_block:
+ op = op + fcntl.LOCK_NB
+ try:
+ fcntl.flock(self.fd, op)
+ return 1
+ except IOError:
+ return 0
+
+ def unlock(self):
+ fcntl.flock(self.fd, fcntl.LOCK_UN)
+
+ def write_signed(self, name):
+ sio = StringIO.StringIO()
+ self._write_to(sio)
+ sio.seek(0)
+ sio.write(gpg.sign(sio.read()))
+ sio.seek(0)
+ (fdno, tmpname) = tempfile.mkstemp(dir=os.path.dirname(name))
+ f = os.fdopen(fdno, "w")
+ if re.search(r"\.gz$", name):
+ fgz = gzip.GzipFile(filename=name, mode="w", compresslevel=6, fileobj=f)
+ util.sendfile(sio, fgz)
+ fgz.close()
+ else:
+ util.sendfile(sio, f)
+ f.flush()
+ os.fsync(f.fileno())
+ f.close()
+ os.chmod(tmpname, 0644)
+ os.rename(tmpname, name)
+
+ def add(self, req):
+ self.requests.append(req)
+
+ def value(self):
+ return self.requests
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import string
+import os
+import atexit
+
+import notify
+import path
+import util
+import chroot
+import stopwatch
+import report
+import log
+import buildlogs
+import status
+from config import config, init_conf
+
+
+
+def run_command(batch):
+ # we want to keep "skip" in queue.html
+ command = batch.command
+
+ # rewrite special "skip:BUILD_ID into touch
+ if command[:5] == "skip:":
+ c = ""
+ for id in command[5:].split(','):
+ if os.path.isdir(path.srpms_dir + '/' + id):
+ c = c + "echo skip:%s;\n" % (id)
+ c = c + "touch %s/%s/skipme;\n" % (path.srpms_dir, id)
+ else:
+ c = c + "echo %s is not valid build-id;\n" % (id)
+ command = c
+
+ if "no-chroot" in batch.command_flags:
+ # TODO: the append here by shell hack should be solved in python
+ c = "(%s) >> %s 2>&1" % (command, batch.logfile)
+ f = os.popen(c)
+ for l in f.xreadlines():
+ pass
+ r = f.close()
+ if r == None:
+ return 0
+ else:
+ return r
+ else:
+ user = "root"
+ if "as-builder" in batch.command_flags:
+ user = "builder"
+ return chroot.run(command, logfile = batch.logfile, user = user)
+
+def build_all(r, build_fnc):
+ status.email = r.requester_email
+ notify.begin(r)
+ tmp = path.build_dir + '/' + util.uuid() + "/"
+ r.tmp_dir = tmp
+ os.mkdir(tmp)
+ atexit.register(util.clean_tmp, tmp)
+
+ log.notice("started processing %s" % r.id)
+ r.chroot_files = []
+ r.some_ok = 0
+ for batch in r.batches:
+ can_build = 1
+ failed_dep = ""
+ for dep in batch.depends_on:
+ if dep.build_failed:
+ can_build = 0
+ failed_dep = dep.spec
+
+ if batch.is_command() and can_build:
+ batch.logfile = tmp + "command"
+ if config.builder in batch.builders:
+ log.notice("running %s" % batch.command)
+ stopwatch.start()
+ batch.build_failed = run_command(batch)
+ if batch.build_failed:
+ log.notice("running %s FAILED" % batch.command)
+ notify.add_batch(batch, "FAIL")
+ else:
+ r.some_ok = 1
+ log.notice("running %s OK" % batch.command)
+ notify.add_batch(batch, "OK")
+ batch.build_time = stopwatch.stop()
+ report.add_pld_builder_info(batch)
+ buildlogs.add(batch.logfile, failed = batch.build_failed, id=r.id)
+ else:
+ log.notice("not running command, not for me.")
+ batch.build_failed = 0
+ batch.log_line("queued command %s for other builders" % batch.command)
+ r.some_ok = 1
+ buildlogs.add(batch.logfile, failed = batch.build_failed, id=r.id)
+ elif can_build:
+ log.notice("building %s" % batch.spec)
+ stopwatch.start()
+ batch.logfile = tmp + batch.spec + ".log"
+ batch.gb_id=r.id
+ batch.requester=r.requester
+ batch.requester_email=r.requester_email
+ batch.build_failed = build_fnc(r, batch)
+ if batch.build_failed:
+ log.notice("building %s FAILED (%s)" % (batch.spec, batch.build_failed))
+ notify.add_batch(batch, batch.build_failed)
+ else:
+ r.some_ok = 1
+ log.notice("building %s OK" % (batch.spec))
+ notify.add_batch(batch, "OK")
+ batch.build_time = stopwatch.stop()
+ report.add_pld_builder_info(batch)
+ buildlogs.add(batch.logfile, failed = batch.build_failed, id=r.id)
+ else:
+ batch.build_failed = 1
+ batch.skip_reason = "SKIPED [%s failed]" % failed_dep
+ batch.logfile = None
+ batch.build_time = ""
+ log.notice("building %s %s" % (batch.spec, batch.skip_reason))
+ notify.add_batch(batch, "SKIP")
+
+ buildlogs.flush()
+ chroot.run("rm -f %s" % string.join(r.chroot_files))
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import path
+import time
+import os
+import re
+import log
+
+from config import config
+import util
+
+class Buildlogs_Queue:
+ def __init__(self):
+ self.queue = []
+ self.some_failed = 0
+ self.requester_email = None
+
+ def init(self, g):
+ self.requester_email = g.requester_email
+
+ def add(self, logfile, failed, id):
+ # if /dev/null, don't even bother to store it
+ if config.buildlogs_url == "/dev/null":
+ return
+ blogfile = os.path.basename(logfile)
+ name = re.sub(r"\.spec\.log", "", blogfile) + "," + id + ".bz2"
+ ret = os.system("bzip2 --best --force < %s > %s" \
+ % (logfile, path.buildlogs_queue_dir + '/' + config.builder + '.' + id + '.' + blogfile))
+ if ret:
+ log.error("bzip2 compression of %s failed; does bzip2 binary exist?" % (logfile))
+
+ if failed: s = "FAIL"
+ else: s = "OK"
+ f = open(path.buildlogs_queue_dir + '/' + config.builder + '.' + id + '.' + blogfile + ".info", "w")
+ f.write("Status: %s\nEND\n" % s)
+ f.close()
+
+ self.queue.append({'name': name, 'id': config.builder + '.' + id + '.' + blogfile, 'failed': failed})
+
+ def flush(self):
+ def desc(l):
+ return """Target: %s/%s
+Builder: %s
+Time: %d
+Type: buildlog
+Requester: %s
+END
+""" % (config.buildlogs_url, l['name'], config.builder, time.time(), self.requester_email)
+
+ q = self.queue[:]
+ for l in q:
+ f = open(path.buildlogs_queue_dir + '/' + l['id'] + ".desc.tmp", "w")
+ f.write(desc(l))
+ f.close()
+ os.rename(path.buildlogs_queue_dir + '/' + l['id'] + ".desc.tmp", path.buildlogs_queue_dir + '/' + l['id'] + ".desc")
+ self.queue.remove(l)
+
+queue = Buildlogs_Queue()
+
+def init(r):
+ queue.init(r)
+
+def add(logfile, failed, id):
+ "Add new buildlog with specified status."
+ queue.add(logfile, failed, id)
+
+def flush():
+ "Send buildlogs to server."
+ queue.flush()
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import os
+import re
+import random
+
+try:
+ from hashlib import md5 as md5
+except ImportError:
+ from md5 import md5
+
+from config import config
+
+def quote(cmd):
+ return re.sub("([\"\\\\$`])", r"\\\1", cmd)
+
+def command(cmd, user = None, nostdin=""):
+ if user == None:
+ user = config.builder_user
+ if nostdin:
+ nostdin = "exec < /dev/null; "
+ return "%s sudo chroot %s su - %s -c \"export LC_ALL=C; %s %s\"" \
+ % (config.sudo_chroot_wrapper, config.chroot, user, nostdin, quote(cmd))
+
+def command_sh(cmd):
+ return "%s sudo chroot %s /bin/sh -c \"export LC_ALL=C; exec < /dev/null; %s\"" \
+ % (config.sudo_chroot_wrapper, config.chroot, quote(cmd))
+
+def popen(cmd, user = "builder", mode = "r"):
+ f = os.popen(command(cmd, user), mode)
+ return f
+
+def run(cmd, user = "builder", logfile = None, logstdout = None):
+ c = command(cmd, user, nostdin=True)
+ if logfile != None:
+ if logstdout != None:
+ c = "%s 2>&1 | /usr/bin/tee -a %s" % (c, logfile)
+ else:
+ c = "%s >> %s 2>&1" % (c, logfile)
+ f = os.popen(c)
+ if logstdout != None:
+ for l in f:
+ logstdout.write(l)
+ r = f.close()
+ if r == None:
+ return 0
+ else:
+ return r
+
+def cp(file, outfile, user="builder", rm=False):
+ m = md5()
+ m.update(str(random.sample(xrange(100000), 500)))
+ digest = m.hexdigest()
+
+ marker_start = "--- FILE BEGIN DIGEST %s ---" % digest
+ marker_end = "--- FILE END DIGEST %s ---" % digest
+
+ f = open(outfile, 'wb')
+ cmd = "echo \"%s\"; cat %s; echo \"%s\"" % (marker_start, file, marker_end)
+ if rm:
+ cmd += "; rm %s" % file
+ c = command(cmd, user)
+ p = os.popen(c)
+ # get file contents
+ marker = False
+ for l in p:
+ if not marker and l.strip() == marker_start:
+ marker = True
+ continue
+ me = l.find(marker_end)
+ if me != -1:
+ l = l[:me]
+ f.write(l)
+ marker = False
+ break
+ if marker:
+ f.write(l)
+ rp = p.close()
+ rf = f.close()
+ if rp == None:
+ return 0
+ else:
+ return rp
--- /dev/null
+ </results>
+ </builder>
+ </body>
+</message>
--- /dev/null
+<message>
+ <generator>
+ <name>PLD Linux Builder client for CIA</name>
+ <version>0.001</version>
+ <url>http://cvs.pld-linux.org/pld-builder.new/PLD_Builder/report.py</url>
+ </generator>
+ <source>
+ <project>pld</project>
+ <branch>Ac</branch>
+ </source>
+ <body>
+ <builder>
+ <results>
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import ConfigParser
+import string
+import os
+import syslog
+
+import path
+import log
+import status
+
+
+syslog_facilities = {
+ 'kern': syslog.LOG_KERN,
+ 'user': syslog.LOG_USER,
+ 'mail': syslog.LOG_MAIL,
+ 'daemon': syslog.LOG_DAEMON,
+ 'auth': syslog.LOG_AUTH,
+ 'lpr': syslog.LOG_LPR,
+ 'news': syslog.LOG_NEWS,
+ 'uucp': syslog.LOG_UUCP,
+ 'cron': syslog.LOG_CRON,
+ 'local0': syslog.LOG_LOCAL0,
+ 'local1': syslog.LOG_LOCAL1,
+ 'local2': syslog.LOG_LOCAL2,
+ 'local3': syslog.LOG_LOCAL3,
+ 'local4': syslog.LOG_LOCAL4,
+ 'local5': syslog.LOG_LOCAL5,
+ 'local6': syslog.LOG_LOCAL6,
+ 'local7': syslog.LOG_LOCAL7
+}
+
+class Builder_Conf:
+ def __init__(self):
+ self.done = 0
+ pass
+
+ def read(self, builder):
+ p = ConfigParser.ConfigParser()
+ def get(o, d = None):
+ if p.has_option(builder, o):
+ return string.strip(p.get(builder, o))
+ elif p.has_option("all", o):
+ return string.strip(p.get("all", o))
+ elif d != None:
+ return d
+ else:
+ log.panic("cannot find %s::%s" % (builder, o))
+
+ p.readfp(open(path.builder_conf))
+
+ self.admin_email = get("admin_email")
+ self.email = self.admin_email
+
+ if p.has_option("all", "syslog"):
+ f = p.get("all", "syslog")
+ if f != "":
+ if syslog_facilities.has_key(f):
+ log.open_syslog("builder", syslog_facilities[f])
+ else:
+ log.panic("no such syslog facility: %s" % f)
+
+ if builder == "src":
+ builder = get("src_builder", builder)
+ self.builder = builder
+
+ self.binary_builders = string.split(get("binary_builders"))
+ self.src_builder = string.strip(get("src_builder", ""))
+ self.tag_prefixes = string.split(get("tag_prefixes", ""))
+ self.max_keep_time = int(get("max_keep_time", 168))*60*60
+ self.bot_email = get("bot_email", "")
+ self.control_url = get("control_url")
+ self.request_handler_server_port = int(get("request_handler_server_port", 1234))
+ self.builder_list = get("builder_list", "")
+ self.gen_upinfo = get("gen_upinfo", "yes")
+ if self.gen_upinfo == 'no':
+ self.gen_upinfo = False
+ else:
+ self.gen_upinfo = True
+ status.admin = self.admin_email
+ status.builder_list = self.builder_list
+ self.max_jobs = int(get("max_jobs"))
+
+ if builder == "all":
+ return
+
+ self.email = get("email")
+ if builder not in p.sections():
+ log.panic("builder %s not in config file" % builder)
+ self.arch = get("arch")
+ self.chroot = get("chroot")
+ self.buildlogs_url = get("buildlogs_url", "/dev/null")
+ self.ftp_url = get("ftp_url")
+ self.notify_url = get("notify_url")
+ self.test_ftp_url = get("test_ftp_url", "/dev/null")
+ self.rpmqa_url = get("rpmqa_url", "/dev/null")
+ self.rpmqa_filename = get("rpmqa_filename")
+ self.job_slots = int(get("job_slots"))
+ self.max_load = float(get("max_load"))
+ self.rpm_cache_dir = get("rpm_cache_dir", "/spools/ready")
+ self.builder_user = get("builder_user", "builder")
+ self.sudo_chroot_wrapper = get("sudo_chroot_wrapper", "")
+ self.nice = get("nice", "0")
+
+ f = get("syslog", "")
+ if f != "":
+ if syslog_facilities.has_key(f):
+ log.open_syslog(self.builder, syslog_facilities[f])
+ else:
+ log.panic("no such syslog facility: %s" % f)
+
+ self.done = 1
+
+config = Builder_Conf()
+
+def init_conf(builder=None):
+ os.environ['LC_ALL'] = "C"
+ status.push("reading builder config")
+ log.builder = builder
+ if not builder:
+ builder = "all"
+ config.read(builder)
+ log.builder = config.builder
+ status.pop()
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import string
+from chroot import *
+from util import *
+
+__all__ = ['compute_deps', 'remove_list']
+
+def compute_deps():
+ """Compute dependenecies between RPM installed on system.
+
+ Return dictionary from name of package to list of packages required by it.
+ Produce some warnings and progress information to stderr.
+ """
+ # pkg-name -> list of stuff returned by rpm -qR
+ rpm_req = {}
+ # --whatprovides ...
+ rpm_prov = {}
+ # list of required files
+ req_files = {}
+
+ def get_req():
+ msg("rpm-req... ")
+ f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{REQUIRENAME}\n]'")
+ cur_pkg = None
+ while 1:
+ l = f.readline()
+ if l == "": break
+ l = string.strip(l)
+ if l == "@":
+ cur_pkg = string.strip(f.readline())
+ rpm_req[cur_pkg] = []
+ continue
+ rpm_req[cur_pkg].append(l)
+ if l[0] == '/':
+ req_files[l] = 1
+ f.close()
+ msg("done\n")
+
+ def add_provides(pkg, what):
+ if rpm_prov.has_key(what):
+ msg("[%s: %s, %s] " % (what, rpm_prov[what], pkg))
+ else:
+ rpm_prov[what] = pkg
+
+ def get_prov():
+ msg("rpm-prov... ")
+ f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{PROVIDENAME}\n]'")
+ cur_pkg = None
+ while 1:
+ l = f.readline()
+ if l == "": break
+ l = string.strip(l)
+ if l == "@":
+ cur_pkg = string.strip(f.readline())
+ continue
+ add_provides(cur_pkg, l)
+ if l[0] == '/':
+ # already provided
+ del req_files[l]
+ f.close()
+ msg("done\n")
+
+ def get_prov_files():
+ msg("rpm-files... ")
+ f = chr_popen("rpm -qa --qf '@\n%{NAME}\n[%{FILENAMES}\n]'")
+ cur_pkg = None
+ while 1:
+ l = f.readline()
+ if l == "": break
+ l = string.strip(l)
+ if l == "@":
+ cur_pkg = string.strip(f.readline())
+ continue
+ if req_files.has_key(l):
+ add_provides(cur_pkg, l)
+ f.close()
+ msg("done\n")
+
+ def compute():
+ msg("computing deps... ")
+ for pkg, reqs in rpm_req.items():
+ pkg_reqs = []
+ for req in reqs:
+ if req[0:7] == "rpmlib(": continue
+ if rpm_prov.has_key(req):
+ if rpm_prov[req] not in pkg_reqs:
+ pkg_reqs.append(rpm_prov[req])
+ else:
+ msg("[%s: %s] " % (pkg, req))
+ requires[pkg] = pkg_reqs
+ msg("done\n")
+
+ # map from pkg-name to list of pkg-names required by it
+ # this is result
+ requires = {}
+
+ get_req()
+ get_prov()
+ get_prov_files()
+ compute()
+ return requires
+
+def remove_list(req, need):
+ """List of packages scheduled for removal.
+
+ Given dependency information and list of needed packages compute list
+ of packages that don't need to be present.
+ """
+ need_m = {}
+ def close(n):
+ if need_m.has_key(n): return
+ need_m[n] = 1
+ if not req.has_key(n): return
+ for k in req[n]:
+ close(k)
+ for n in need: close(n)
+ rm = []
+ for p in req.keys():
+ if not need_m.has_key(p): rm.append(p)
+ return rm
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import glob
+import re
+import string
+import os
+import time
+import shutil
+import sys
+import traceback
+import urllib2
+
+from config import config, init_conf
+import mailer
+import path
+import log
+import loop
+import status
+import lock
+
+retries_times = [5 * 60, 5 * 60, 10 * 60, 10 * 60, 30 * 60, 60 * 60]
+
+def read_name_val(file):
+ f = open(file)
+ r = {'_file': file[:-5], '_desc': file}
+ rx = re.compile(r"^([^:]+)\s*:(.*)$")
+ for l in f.xreadlines():
+ if l == "END\n":
+ f.close()
+ return r
+ m = rx.search(l)
+ if m:
+ r[m.group(1)] = string.strip(m.group(2))
+ else:
+ break
+ f.close()
+ return None
+
+def scp_file(src, target):
+ global problems
+ f = os.popen("scp -v -B %s %s 2>&1 < /dev/null" % (src, target))
+ p = f.read()
+ ret = f.close()
+ if ret:
+ problems[src] = p
+ return ret
+
+def copy_file(src, target):
+ try:
+ shutil.copyfile(src, target)
+ return 0
+ except:
+ global problems
+ exctype, value = sys.exc_info()[:2]
+ problems[src] = "cannot copy file: %s" % traceback.format_exception_only(exctype, value)
+ return 1
+
+def rsync_file(src, target, host):
+ global problems
+
+ p = open(path.rsync_password_file, "r")
+ password = ""
+ for l in p.xreadlines():
+ l = string.split(l)
+ if len(l) >= 2 and l[0] == host:
+ password = l[1]
+ p.close()
+
+ # NOTE: directing STDIN to /dev/null, does not make rsync to skip asking
+ # password, it opens /dev/tty and still asks if password is needed and
+ # missing, therefore we always set RSYNC_PASSWORD env var
+ os.environ["RSYNC_PASSWORD"] = password
+ rsync = "rsync --verbose --archive --timeout=360 --contimeout=360"
+ f = os.popen("%s %s %s 2>&1" % (rsync, src, target))
+ p = f.read()
+ ret = f.close()
+ if ret:
+ problems[src] = p
+ del os.environ["RSYNC_PASSWORD"];
+ return ret
+
+def rsync_ssh_file(src, target):
+ global problems
+ rsync = "rsync --verbose --archive --timeout=360 -e ssh"
+ f = os.popen("%s %s %s 2>&1 < /dev/null" % (rsync, src, target))
+ p = f.read()
+ ret = f.close()
+ if ret:
+ problems[src] = p
+ return ret
+
+def post_file(src, url):
+ global problems
+ try:
+ f = open(src, 'r')
+ data = f.read()
+ f.close()
+ req = urllib2.Request(url, data)
+ req.add_header('X-Filename', os.path.basename(src))
+ f = urllib2.urlopen(req)
+ f.close()
+ except Exception, e:
+ problems[src] = e
+ return e
+ return 0
+
+def send_file(src, target):
+ global problems
+ try:
+ log.notice("sending %s to %s (size %d bytes)" % (src, target, os.stat(src).st_size))
+ m = re.match('rsync://([^/]+)/.*', target)
+ if m:
+ return not rsync_file(src, target, host = m.group(1))
+ if target != "" and target[0] == '/':
+ return not copy_file(src, target)
+ m = re.match('scp://([^@:]+@[^/:]+)(:|)(.*)', target)
+ if m:
+ return not scp_file(src, m.group(1) + ":" + m.group(3))
+ m = re.match('ssh\+rsync://([^@:]+@[^/:]+)(:|)(.*)', target)
+ if m:
+ return not rsync_ssh_file(src, m.group(1) + ":" + m.group(3))
+ m = re.match('http://.*', target)
+ if m:
+ return not post_file(src, target)
+ log.alert("unsupported protocol: %s" % target)
+ except OSError, e:
+ problems[src] = e
+ log.error("send_file(%s, %s): %s" % (src, target, e))
+ return False
+ return True
+
+def maybe_flush_queue(dir):
+ retry_delay = 0
+ try:
+ f = open(dir + "/retry-at")
+ last_retry = int(string.strip(f.readline()))
+ retry_delay = int(string.strip(f.readline()))
+ f.close()
+ if last_retry + retry_delay > time.time():
+ return
+ os.unlink(dir + "/retry-at")
+ except:
+ pass
+
+ status.push("flushing %s" % dir)
+
+ if flush_queue(dir):
+ f = open(dir + "/retry-at", "w")
+ if retry_delay in retries_times:
+ idx = retries_times.index(retry_delay)
+ if idx < len(retries_times) - 1: idx += 1
+ else:
+ idx = 0
+ f.write("%d\n%d\n" % (time.time(), retries_times[idx]))
+ f.close()
+
+ status.pop()
+
+def flush_queue(dir):
+ q = []
+ os.chdir(dir)
+ for f in glob.glob(dir + "/*.desc"):
+ d = read_name_val(f)
+ if d != None: q.append(d)
+ def mycmp(x, y):
+ rc = cmp(x['Time'], y['Time'])
+ if rc == 0 and x.has_key('Type') and y.has_key('Type'):
+ return cmp(x['Type'], y['Type'])
+ else:
+ return rc
+ q.sort(mycmp)
+
+ error = None
+ # copy of q
+ remaining = q[:]
+ for d in q:
+ if not send_file(d['_file'], d['Target']):
+ error = d
+ continue
+ if os.access(d['_file'] + ".info", os.F_OK):
+ if not send_file(d['_file'] + ".info", d['Target'] + ".info"):
+ error = d
+ continue
+ os.unlink(d['_file'] + ".info")
+ os.unlink(d['_file'])
+ os.unlink(d['_desc'])
+ remaining.remove(d)
+
+ if error != None:
+ emails = {}
+ emails[config.admin_email] = 1
+ pr = ""
+ for src, msg in problems.iteritems():
+ pr = pr + "[src: %s]\n\n%s\n" % (src, msg)
+ for d in remaining:
+ if d.has_key('Requester'):
+ emails[d['Requester']] = 1
+ e = emails.keys()
+ m = mailer.Message()
+ m.set_headers(to = string.join(e, ", "),
+ subject = "[%s] builder queue problem" % config.builder)
+ m.write("there were problems sending files from queue %s:\n" % dir)
+ m.write("problems:\n")
+ m.write("%s\n" % pr)
+ m.send()
+ log.error("error sending files from %s:\n%s\n" % (dir, pr))
+ return 1
+
+ return 0
+
+problems = {}
+
+def main():
+ if lock.lock("sending-files", non_block = 1) == None:
+ return
+ init_conf()
+ maybe_flush_queue(path.notify_queue_dir)
+ maybe_flush_queue(path.buildlogs_queue_dir)
+ maybe_flush_queue(path.ftp_queue_dir)
+
+if __name__ == '__main__':
+ loop.run_loop(main)
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import path
+import os
+import shutil
+import time
+
+from config import config
+import util
+
+class FTP_Queue:
+ def __init__(self):
+ self.queue = None
+ self.some_failed = 0
+ self.status = ""
+
+ def init(self, g=None, rpmqa=False):
+ self.queue = []
+ if rpmqa:
+ self.requester_email = config.admin_email
+ self.url = config.rpmqa_url
+ else:
+ self.requester_email = g.requester_email
+ if "test-build" in g.flags:
+ self.url = config.test_ftp_url
+ else:
+ self.url = config.ftp_url
+
+ def add(self, file, type):
+ # if /dev/null, say bye bye
+ if self.url == "/dev/null":
+ return
+ name = os.path.basename(file)
+ id = util.uuid()
+ shutil.copy(file, path.ftp_queue_dir + '/' + id)
+ self.queue.append({'name': name, 'id': id, 'type': type})
+ st = os.stat(path.ftp_queue_dir + '/' + id)
+ self.status += "%10d %s\n" % (st.st_size, name)
+
+ def flush(self):
+ def desc(l):
+ return """Target: %s/%s
+Builder: %s
+Time: %d
+Type: %s
+Requester: %s
+END
+""" % (self.url, l['name'], config.builder, time.time(), l['type'], self.requester_email)
+
+ for l in self.queue:
+ f = open(path.ftp_queue_dir + '/' + l['id'] + ".desc", "w")
+ f.write(desc(l))
+ f.close()
+
+ def kill(self):
+ for l in self.queue:
+ os.unlink(path.ftp_queue_dir + '/' + l)
+
+queue = FTP_Queue()
+
+def add(f, type="rpm"):
+ queue.add(f, type)
+
+def flush():
+ queue.flush()
+
+def kill():
+ queue.kill()
+
+def init(r=None, rpmqa=False):
+ queue.init(r, rpmqa)
+
+def status():
+ return queue.status
+
+def clear_status():
+ queue.status = ""
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import re
+import string
+import xreadlines
+from util import *
+
+
+def get_build_requires(spec, bconds_with, bconds_without):
+ cond_rx = re.compile(r"%\{(\!\?|\?\!|\?)([a-zA-Z0-9_+]+)\s*:([^%\{\}]*)\}")
+
+ def expand_conds(l):
+ def expand_one(m):
+ if m.group(1) == "?":
+ if macros.has_key(m.group(2)):
+ return m.group(3)
+ else:
+ if not macros.has_key(m.group(2)):
+ return m.group(3)
+ return ""
+
+ for i in range(10):
+ l = cond_rx.sub(expand_one, l)
+ if len(l) > 1000: break
+
+ return l
+
+ macro_rx = re.compile(r"%\{([a-zA-Z0-9_+]+)\}")
+ def expand_macros(l):
+ def expand_one(m):
+ if macros.has_key(m.group(1)):
+ return string.strip(macros[m.group(1)])
+ else:
+ return m.group(0) # don't change
+
+ for i in range(10):
+ l = macro_rx.sub(expand_one, l)
+ if len(l) > 1000: break
+
+ return expand_conds(l)
+
+ simple_br_rx = re.compile(r"^BuildRequires\s*:\s*([^\s]+)", re.I)
+ bcond_rx = re.compile(r"^%bcond_(with|without)\s+([^\s]+)")
+ version_rx = re.compile(r"^Version\s*:\s*([^\s]+)", re.I)
+ release_rx = re.compile(r"^Release\s*:\s*([^\s]+)", re.I)
+ name_rx = re.compile(r"^Name\s*:\s*([^\s]+)", re.I)
+ define_rx = re.compile(r"^\%define\s+([a-zA-Z0-9_+]+)\s+(.*)", re.I)
+ any_br_rx = re.compile(r"BuildRequires", re.I)
+
+ macros = {}
+ for b in bconds_with:
+ macros["_with_%s" % b] = 1
+ for b in bconds_without:
+ macros["_without_%s" % b] = 1
+
+ macros["__perl"] = "/usr/bin/perl"
+ macros["_bindir"] = "/usr/bin"
+ macros["_sbindir"] = "/usr/sbin"
+ macros["kgcc_package"] = "gcc"
+
+ build_req = []
+
+ f = open(spec)
+ for l in xreadlines.xreadlines(f):
+ l = string.strip(l)
+ if l == "%changelog": break
+
+ # %bcond_with..
+ m = bcond_rx.search(l)
+ if m:
+ bcond = m.group(2)
+ if m.group(1) == "with":
+ if macros.has_key("_with_%s" % bcond):
+ macros["with_%s" % bcond] = 1
+ else:
+ if not macros.has_key("_without_%s" % bcond):
+ macros["with_%s" % bcond] = 1
+ continue
+
+ # name,version,release
+ m = version_rx.search(l)
+ if m: macros["version"] = m.group(1)
+ m = release_rx.search(l)
+ if m: macros["release"] = m.group(1)
+ m = name_rx.search(l)
+ if m: macros["name"] = m.group(1)
+
+ # %define
+ m = define_rx.search(l)
+ if m: macros[m.group(1)] = m.group(2)
+
+ # *BuildRequires*
+ if any_br_rx.search(l):
+ l = expand_macros(l)
+ m = simple_br_rx.search(l)
+ if m:
+ build_req.append(m.group(1))
+ else:
+ if l <> "" and l[0] <> '#':
+ msg("spec error (%s): %s\n" % (spec, l))
+
+ for x in build_req:
+ print x
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import log
+import subprocess
+import re
+import StringIO
+
+import util
+import os
+import pipeutil
+
+def get_keys(buf):
+ """Extract keys from gpg message
+
+ """
+
+ if not os.path.isfile('/usr/bin/gpg'):
+ log.error("missing gnupg binary: /usr/bin/gpg")
+ raise OSError, 'Missing gnupg binary'
+
+ d_stdout = None
+ d_stderr = None
+ cmd = ['/usr/bin/gpg', '--batch', '--no-tty', '--decrypt']
+ gpg_run = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
+ try:
+ d_stdout, d_stderr = gpg_run.communicate(buf.encode('utf-8'))
+ except OSError, e:
+ log.error("gnupg run, does gpg binary exist? : %s" % e)
+ raise
+
+ rx = re.compile("^gpg: Signature made .*using [DR]SA key ID (.+)")
+ keys = []
+
+ for l in d_stderr.split('\n'):
+ m = rx.match(l)
+ if m:
+ keys.append(m.group(1))
+
+ return keys
+
+def verify_sig(buf):
+ """Check signature.
+
+ Given email as file-like object, return (signer-emails, signed-body).
+ where signer-emails is lists of strings, and signed-body is StringIO
+ object.
+ """
+
+ if not os.path.isfile('/usr/bin/gpg'):
+ log.error("missing gnupg binary: /usr/bin/gpg")
+ raise OSError, 'Missing gnupg binary'
+
+ d_stdout = None
+ d_stderr = None
+ cmd = ['/usr/bin/gpg', '--batch', '--no-tty', '--decrypt']
+ gpg_run = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
+ try:
+ d_stdout, d_stderr = gpg_run.communicate(buf.encode('utf-8'))
+ except OSError, e:
+ log.error("gnupg run failed, does gpg binary exist? : %s" % e)
+ raise
+
+ rx = re.compile("^gpg: (Good signature from| aka) .*<([^>]+)>")
+ emails = []
+ for l in d_stderr.split('\n'):
+ m = rx.match(l)
+ if m:
+ emails.append(m.group(2))
+ if not emails:
+ log.error("gnupg signature check failed: %s" % d_stderr)
+ return (emails, d_stdout)
+
+def sign(buf):
+ if not os.path.isfile('/usr/bin/gpg'):
+ log.error("missing gnupg binary: /usr/bin/gpg")
+ raise OSError, 'Missing gnupg binary'
+
+ d_stdout = None
+ d_stderr = None
+ cmd = ['/usr/bin/gpg', '--batch', '--no-tty', '--clearsign']
+ gpg_run = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
+ try:
+ d_stdout, d_stderr = gpg_run.communicate(buf.encode('utf-8'))
+ except OSError, e:
+ log.error("gnupg signing failed, does gpg binary exist? : %s" % e)
+ raise
+
+ return d_stdout
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import re, os
+import string
+import StringIO
+
+import chroot
+import util
+import log
+
+hold = [
+ 'dev',
+ 'poldek',
+ 'rpm-build',
+ 'pdksh',
+ 'coreutils'
+]
+
+def close_killset(killset):
+ k = killset.keys()
+ if len(k) == 0:
+ return True
+ rx = re.compile(r'^.* marks (?P<name>[^\s]+?)-[^-]+-[^-]+\s.*$')
+ errors = ""
+ for p in k:
+ if p in hold:
+ del killset[p]
+ errors += "cannot remove %s because it's crucial\n" % p
+ else:
+ f = chroot.popen("poldek --noask --test --test --erase %s" % p, user = "root")
+ crucial = 0
+ e = []
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m:
+ pkg = m.group('name')
+ if pkg in hold:
+ errors += "cannot remove %s because it's required " \
+ "by %s, that is crucial\n" % (p, pkg)
+ crucial = 1
+ e.append(pkg)
+ f.close()
+ if crucial:
+ del killset[p]
+ else:
+ for p in e:
+ killset[p] = 2
+ return errors
+
+def upgrade_from_batch(r, b):
+ f = chroot.popen("rpm --test -F %s 2>&1" % string.join(b.files), user = "root")
+ killset = {}
+ rx = re.compile(r' \(installed\) (?P<name>[^\s]+)-[^-]+-[^-]+$')
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m: killset[m.group('name')] = 1
+ f.close()
+ if len(killset) != 0:
+ err = close_killset(killset)
+ if err != "":
+ util.append_to(b.logfile, err)
+ log.notice("cannot upgrade rpms")
+ return False
+ k = string.join(killset.keys())
+ if True:
+ b.log_line("upgrade requires removal of %s" % k)
+ res = chroot.run("rpm -e %s" % k, logfile = b.logfile, user = "root")
+ if res != 0:
+ b.log_line("package removal failed")
+ return False
+ else:
+ b.log_line("packages removed sucessfuly")
+ else:
+ b.log_line("upgrade would need removal of %s" % k)
+ return False
+ b.log_line("upgrading packages")
+ logbuf = StringIO.StringIO()
+ res = chroot.run("rpm -Fvh %s" % string.join(b.files), user = "root", logfile = b.logfile)
+ if res != 0:
+ b.log_line("package upgrade failed")
+ logbuf.close()
+ return False
+ logbuf.close()
+ return True
+
+def uninstall(conflicting, b):
+ b.log_line("uninstalling conflicting packages")
+ err = close_killset(conflicting)
+ if err != "":
+ util.append_to(b.logfile, err)
+ b.log_line("error: conflicting packages uninstallation failed")
+ return False
+ else:
+ for k in conflicting.keys():
+ b.log_line("removing %s" % k)
+ res = chroot.run("poldek --noask --erase %s" % k, logfile = b.logfile, user = "root")
+ if res != 0:
+ b.log_line("package %s removal failed" % k)
+ return True
+
+def uninstall_self_conflict(b):
+ b.log_line("checking BuildConflict-ing packages")
+ packagename = b.spec[:-5]
+ tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s/tmp" % packagename
+ f = chroot.popen("set -e; TMPDIR=%(tmpdir)s rpmbuild -bp --nobuild --short-circuit --define 'prep exit 0' %(rpmdefs)s rpm/packages/%(package)s/%(spec)s 2>&1" % {
+ 'tmpdir': tmpdir,
+ 'rpmdefs' : b.rpmbuild_opts(),
+ 'package' : packagename,
+ 'spec': b.spec,
+ })
+ # java-sun >= 1.5 conflicts with soprano-2.1.67-1.src
+ # java-sun conflicts with soprano-2.1.67-1.src
+ rx = re.compile(r"\s+(?P<name>[\w-]+)\s+.*conflicts with [^\s]+-[^-]+-[^-]+\.src($| .*)")
+ conflicting = {}
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m:
+ b.log_line("rpmbuild: %s" % l.rstrip())
+ conflicting[m.group('name')] = 1
+ f.close()
+ if len(conflicting) and not uninstall(conflicting, b):
+ return False
+ b.log_line("no BuildConflicts found")
+ return True
+
+def install_br(r, b):
+ def get_missing_br(r, b):
+ # ignore internal rpm dependencies, see lib/rpmns.c for list
+ ignore_br = re.compile(r'^\s*(rpmlib|cpuinfo|getconf|uname|soname|user|group|mounted|diskspace|digest|gnupg|macro|envvar|running|sanitycheck|vcheck|signature|verify|exists|executable|readable|writable)\(.*')
+
+ packagename = b.spec[:-5]
+ tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s/tmp" % packagename
+ chroot.run("install -m 700 -d %s" % tmpdir)
+ cmd = "set -e; TMPDIR=%(tmpdir)s rpmbuild --nobuild %(rpmdefs)s rpm/packages/%(package)s/%(spec)s 2>&1" % {
+ 'tmpdir': tmpdir,
+ 'rpmdefs' : b.rpmbuild_opts(),
+ 'package' : packagename,
+ 'spec': b.spec,
+ }
+ f = chroot.popen(cmd)
+ rx = re.compile(r"^\s*(?P<name>[^\s]+) .*is needed by")
+ needed = {}
+ b.log_line("checking BR")
+ for l in f.xreadlines():
+ b.log_line("rpm: %s" % l.rstrip())
+ m = rx.search(l)
+ if m and not ignore_br.match(l):
+ needed[m.group('name')] = 1
+ f.close()
+ return needed
+
+ needed = get_missing_br(r, b);
+
+ if len(needed) == 0:
+ b.log_line("no BR needed")
+ return True
+
+ nbr = ""
+ for bre in needed.keys():
+ nbr = nbr + " " + re.escape(bre)
+ br = string.strip(nbr)
+ b.log_line("updating poldek cache...")
+ chroot.run("poldek --up --upa", user = "root", logfile = b.logfile)
+ # check conflicts in BRed packages
+ b.log_line("checking conflicting packages in BRed packages")
+ f = chroot.popen("poldek --test --test --noask --caplookup -Q -v --upgrade %s" % br, user = "root")
+ # phonon-devel-4.3.1-1.i686 conflicts with qt4-phonon-devel-4.5.0-6.i686
+ # jdbc-stdext >= 2.0 is required by installed java-struts-1.3.10-1.noarch
+ # jmx is needed by (installed) java-commons-modeler-2.0-1.noarch
+ rx = re.compile(r".*(conflicts with|is required by|is needed by)( installed| \(installed\)|) (?P<name>[^\s]+)-[^-]+-[^-]+($| .*)")
+ conflicting = {}
+ for l in f.xreadlines():
+ b.log_line("poldek: %s" % l.rstrip())
+ m = rx.search(l)
+ if m: conflicting[m.group('name')] = 1
+ f.close()
+ if len(conflicting) == 0:
+ b.log_line("no conflicts found")
+ else:
+ if not uninstall(conflicting, b):
+ return False
+
+ # recheck BuildRequires since above uninstallation could remove some required deps
+ needed = get_missing_br(r, b);
+
+ if len(needed) == 0:
+ b.log_line("no BR needed")
+ return True
+
+ nbr = ""
+ for bre in needed.keys():
+ nbr = nbr + " " + re.escape(bre)
+ br = string.strip(nbr)
+
+ b.log_line("installing BR: %s" % br)
+ res = chroot.run("poldek --noask --caplookup -Q -v --upgrade %s" % br,
+ user = "root",
+ logfile = b.logfile)
+ if res != 0:
+ b.log_line("error: BR installation failed")
+ return False
+ return True
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import string
+import os
+import time
+
+import path
+import sys
+import log
+import status
+import lock
+import loop
+
+import rpm_builder
+
+from config import config, init_conf
+
+# return list of binary builders in fair-queue order
+# it is determined based upon spool/got_lock file, which is also
+# updated to be short
+def builders_order():
+ bs = {}
+ bl = []
+ for b in config.binary_builders:
+ bs[b] = 0
+ bl.append(b)
+
+ lck = lock.lock("got-lock")
+ f = open(path.got_lock_file, "r+")
+ line_no = 0
+
+ for l in f.xreadlines():
+ line_no += 1
+ b = string.strip(l)
+ if bs.has_key(b):
+ bs[b] = line_no
+ else:
+ log.alert("found strange lock in got-lock: %s" % b)
+
+ def mycmp(b1, b2):
+ return cmp(bs[b1], bs[b2])
+
+ bl.sort(mycmp)
+
+ f.seek(0)
+ f.truncate(0)
+ for l in bl: f.write(l + "\n")
+ f.close()
+ lck.close()
+
+ return bl
+
+def run_rpm_builder(b):
+ if os.fork() == 0:
+ return
+ else:
+ rpm_builder.main_for(b)
+ sys.exit(0)
+
+def main():
+ init_conf()
+ for b in builders_order():
+ run_rpm_builder(b)
+ # give builder some time to aquire lock
+ time.sleep(1)
+ # wait for children to die out
+ try:
+ while 1: os.wait()
+ except:
+ pass
+
+if __name__ == '__main__':
+ loop.run_loop(main)
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import fcntl
+
+import path
+
+locks_list = []
+
+def lock(n, non_block = 0):
+ f = open(path.lock_dir + '/' + n, "a")
+ # blah, otherwise it gets garbage collected and doesn't work
+ locks_list.append(f)
+ if non_block:
+ try:
+ fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ except:
+ f.close()
+ return None
+ else:
+ fcntl.flock(f, fcntl.LOCK_EX)
+ return f
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import os
+import sys
+import time
+import syslog
+
+import path
+
+builder = ""
+do_syslog = 0
+
+# string containing last log entry,
+# as the code is flawed to get this otherwise
+_last_log = ""
+
+def log(p, s):
+ global _last_log
+ _last_log = s
+
+ if do_syslog:
+ try:
+ syslog.syslog(p, str(s))
+ except TypeError:
+ syslog.syslog(p, repr(s))
+ f = open(path.log_file, "a")
+ f.write("%s [%s]: %s [%s]\n" % (time.asctime(), builder, s, os.path.basename(sys.argv[0])))
+ f.close()
+
+def panic(s):
+ log(syslog.LOG_ALERT, "PANIC: %s" % s)
+ raise Exception, "PANIC: %s" % str(s)
+
+def alert(s):
+ log(syslog.LOG_ALERT, "alert: %s" % s)
+
+def error(s):
+ log(syslog.LOG_ERR, "error: %s" % s)
+
+def warn(s):
+ log(syslog.LOG_WARNING, "warning: %s" % s)
+
+def notice(s):
+ log(syslog.LOG_NOTICE, "notice: %s" % s)
+
+def open_syslog(name, f):
+ global do_syslog
+ do_syslog = 1
+ syslog.openlog(name, syslog.LOG_PID, f)
+
+def last_log():
+ return _last_log
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import os
+import sys
+import time
+
+import wrap
+
+def run_loop(fnc, secs = 5, max = 60):
+ def run():
+ pid = os.fork()
+ if pid == 0:
+ wrap.wrap(fnc)
+ sys.exit(0)
+ else:
+ pid, s = os.waitpid(pid, 0)
+ if os.WIFEXITED(s):
+ s = os.WEXITSTATUS(s)
+ if s != 0:
+ sys.exit(s)
+ else:
+ sys.exit(10)
+
+ start = time.time()
+ while time.time() - start < max:
+ last = time.time()
+ run()
+ took = time.time() - last
+ if took < secs:
+ time.sleep(secs - took)
+
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import time
+import os
+import sys
+import StringIO
+
+from config import config
+import util
+import log
+
+def recode(s):
+ if s.__class__ == ''.__class__:
+ return s.decode('iso-8859-1', 'replace').encode('us-ascii', 'replace')
+ else:
+ return s.encode('us-ascii', 'replace')
+
+class Message:
+ def __init__(self):
+ self.headers = {}
+ self.body = StringIO.StringIO()
+ self.set_std_headers()
+
+ def set_header(self, n, v):
+ self.headers[n] = v
+
+ def set_headers(self, to = None, cc = None, subject = None):
+ if to != None:
+ self.set_header("To", to)
+ if cc != None:
+ self.set_header("Cc", cc)
+ if subject != None:
+ self.set_header("Subject", subject)
+
+ def write_line(self, l):
+ self.body.write(recode("%s\n" % l))
+
+ def write(self, s):
+ self.body.write(recode(s))
+
+ def append_log(self, log):
+ s = os.stat(log)
+ if s.st_size > 50000:
+ # just head and tail
+ f = open(log)
+ line_cnt = 0
+ for l in f.xreadlines():
+ line_cnt += 1
+ f.seek(0)
+ line = 0
+ for l in f.xreadlines():
+ if line < 100 or line > line_cnt - 100:
+ self.body.write(recode(l))
+ if line == line_cnt - 100:
+ self.body.write("\n\n[...]\n\n")
+ line += 1
+ else:
+ util.sendfile(open(log), self.body)
+
+ def set_std_headers(self):
+ self.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime())
+ self.headers["Message-ID"] = "<pld-builder.%f.%d@%s>" \
+ % (time.time(), os.getpid(), os.uname()[1])
+ self.headers["From"] = "PLD %s builder <%s>" \
+ % (config.builder, config.email)
+ self.headers["X-PLD-Builder"] = config.builder
+
+ def write_to(self, f):
+ for k, v in self.headers.items():
+ f.write("%s: %s\n" % (k, v))
+ f.write("\n")
+ self.body.seek(0)
+ util.sendfile(self.body, f)
+
+ def send(self):
+ if not os.path.exists("/usr/lib/sendmail"):
+ # TODO: dump to file?
+ log.alert("/usr/lib/sendmail doesn't exist: Can't send email")
+ return False
+ send_sendmail = "/usr/lib/sendmail -i -t -f %s" % config.admin_email
+ f = os.popen(send_sendmail, "w")
+ try:
+ self.write_to(f)
+ except IOError, e:
+ log.alert("sending email message failed: %s" % e)
+ f.close()
+ return False
+ return f.close()
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import os
+import sys
+import time
+import datetime
+
+from config import config, init_conf
+import util
+import chroot
+import ftp
+import path
+
+def clean_dir(path, max):
+ curtime=time.time()
+ for i in os.listdir(path):
+ if curtime - os.path.getmtime(path+'/'+i) > max:
+ if os.path.isdir(path+'/'+i):
+ util.clean_tmp(path+'/'+i)
+ else:
+ os.unlink(path+'/'+i)
+
+def send_rpmqa():
+ tmp = path.build_dir + '/' + util.uuid() + '/'
+ os.mkdir(tmp)
+ log = tmp + config.rpmqa_filename
+ open(log, 'a').write("Query done at: %s\n" % datetime.datetime.now().isoformat(' '))
+ chroot.run("rpm -qa|sort", logfile=log)
+ os.chmod(log,0644)
+ ftp.init(rpmqa=True)
+ ftp.add(log)
+ ftp.flush()
+ os.unlink(log)
+ os.rmdir(tmp)
+
+def handle_src():
+ send_rpmqa()
+ clean_dir(path.www_dir+'/srpms', 2592000) # a month
+
+def handle_bin():
+ send_rpmqa()
+ f=chroot.popen("""ls -l --time-style +%s /spools/ready""", 'root')
+ rmpkgs=[]
+ curtime=time.time()
+ for i in f:
+ if i[-4:-1]!='rpm':
+ continue
+ tmp=i.split()
+ mtime=int(tmp[5])
+ pkgname=tmp[6]
+ if curtime - mtime > config.max_keep_time:
+ rmpkgs.append(pkgname)
+
+ i=0
+ while rmpkgs[i:i+1000]:
+ chroot.run("cd /spools/ready; rm -f %s" % ' '.join(rmpkgs[i:i+1000]), 'root')
+ i=i+1000
+ f.close()
+ chroot.run("poldek --mo=nodiff --mkidxz -s /spools/ready")
+
+if __name__ == '__main__':
+ init_conf()
+ bb=config.binary_builders[:]
+ clean_dir(path.spool_dir+'/builds', 2592000) # a month
+ if config.src_builder:
+ try:
+ init_conf(config.src_builder)
+ except:
+ pass
+ else:
+ handle_src()
+ sys.exit(0)
+ for b in bb:
+ try:
+ init_conf(b)
+ except:
+ continue
+ else:
+ handle_bin()
+
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import StringIO
+
+import mailer
+import gpg
+import util
+import notifyq
+from config import config
+
+class Notifier:
+ def __init__(self, g):
+ self.xml = StringIO.StringIO()
+ self.xml.write("<notification group-id='%s' builder='%s'>\n" % \
+ (g.id, config.builder))
+
+ def send(self, r):
+ sio = StringIO.StringIO()
+ self.xml.write("</notification>\n")
+ self.xml.seek(0)
+ sio.write(gpg.sign(self.xml.read()))
+ self.xml = None
+ sio.seek(0)
+ notifyq.init(r)
+ notifyq.add(sio)
+ notifyq.flush()
+
+ def add_batch(self, b, s):
+ self.xml.write(" <batch id='%s' status='%s' />\n" % (b.b_id, s))
+
+n = None
+
+def begin(group):
+ global n
+ n = Notifier(group)
+
+def add_batch(batch, status):
+ n.add_batch(batch, status)
+
+def send(r):
+ n.send(r)
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import path
+import os
+import shutil
+import time
+
+from config import config
+import util
+
+class Notify_Queue:
+ def __init__(self):
+ self.queue = None
+ self.some_failed = 0
+
+ def init(self, g=None):
+ self.queue = []
+ self.requester_email = g.requester_email
+ self.notify_url = config.notify_url
+
+ def add(self, file):
+ id = util.uuid()
+ f = open(path.notify_queue_dir + '/' + id, 'w')
+ f.write(file.read())
+ f.close()
+ self.queue.append({'id': id})
+
+ def flush(self):
+ def desc(l):
+ return """Target: %s
+Id: %s
+Builder: %s
+Time: %d
+Requester: %s
+END
+""" % (self.notify_url, l['id'], config.builder, time.time(), self.requester_email)
+
+ for l in self.queue:
+ f = open(path.notify_queue_dir + '/' + l['id'] + ".desc", "w")
+ f.write(desc(l))
+ f.close()
+
+ def kill(self):
+ for l in self.queue:
+ os.unlink(path.notify_queue_dir + '/' + l)
+
+queue = Notify_Queue()
+
+def add(notify):
+ queue.add(notify)
+
+def flush():
+ queue.flush()
+
+def kill():
+ queue.kill()
+
+def init(r):
+ queue.init(r)
+
+def status():
+ return queue.status
+
+def clear_status():
+ queue.status = ""
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import os
+
+root_dir = os.getenv('BUILDERPATH')
+if root_dir is None:
+ root_dir = os.path.expanduser('~/pld-builder.new')
+conf_dir = root_dir + "/config"
+spool_dir = root_dir + "/spool"
+lock_dir = root_dir + "/lock"
+www_dir = root_dir + "/www"
+
+acl_conf = conf_dir + "/acl.conf"
+builder_conf = conf_dir + "/builder.conf"
+rsync_password_file = conf_dir + "/rsync-passwords"
+
+# spool/
+queue_file = spool_dir + "/queue"
+req_queue_file = spool_dir + "/req_queue"
+processed_ids_file = spool_dir + "/processed_ids"
+notify_queue_dir = spool_dir + "/notify"
+buildlogs_queue_dir = spool_dir + "/buildlogs"
+ftp_queue_dir = spool_dir + "/ftp"
+build_dir = spool_dir + "/builds"
+last_req_no_file = spool_dir + "/last_req_no"
+got_lock_file = spool_dir + "/got_lock"
+log_file = spool_dir + "/log"
+
+# www/
+srpms_dir = www_dir + "/srpms"
+req_queue_signed_file = www_dir + "/queue.gz"
+max_req_no_file = www_dir + "/max_req_no"
+queue_stats_file = www_dir + "/queue.txt"
+queue_html_stats_file = www_dir + "/queue.html"
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import select
+import os
+import StringIO
+
+def rw_pipe(buf_, infd, outfd):
+ buf = StringIO.StringIO()
+ buf.write(buf_.read())
+ ret = StringIO.StringIO()
+ pos = 0
+ rd_fin = 0
+ wr_fin = 0
+ buf.seek(pos)
+ while not (rd_fin and wr_fin):
+ if wr_fin:
+ o = []
+ else:
+ o = [infd]
+ if rd_fin:
+ i = []
+ else:
+ i = [outfd]
+ i, o, e = select.select(i, o, [])
+ if i != []:
+ s = os.read(outfd.fileno(), 1000)
+ if s == "":
+ rd_fin = 1
+ ret.write(s)
+ if o != []:
+ buf.seek(pos)
+ s = buf.read(1000)
+ if s == "":
+ infd.close()
+ wr_fin = 1
+ else:
+ cnt = os.write(infd.fileno(), s)
+ pos += cnt
+ outfd.close()
+ ret.seek(0)
+ return ret
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import re
+import types
+import string
+import xreadlines
+
+from chroot import *
+from util import *
+
+
+def get_poldek_requires():
+ # precompile regexps
+ name_rx = re.compile(r"\d+\. ([^\s]+)-[^-]+-[^-]+\n")
+ req_rx = re.compile(r" req .* --> (.*)\n")
+ pkg_name_rx = re.compile(r"([^\s]+)-[^-]+-[^-]+")
+
+ # todo: if a and b are sets, then use sets module
+ # and intersection method on set object
+ def intersect(a, b):
+ r = []
+ for x in a:
+ if x in b: r.append(x)
+ return r
+
+ # add given req-list to cur_pkg_reqs
+ def add_req(reqs):
+ if len(reqs) == 1:
+ if reqs[0] not in cur_pkg_reqs:
+ cur_pkg_reqs.append(reqs[0])
+ else:
+ did = 0
+ for x in cur_pkg_reqs:
+ if type(x) is types.ListType:
+ i = intersect(x, reqs)
+ if len(i) == 0:
+ continue
+ did = 1
+ idx = cur_pkg_reqs.index(x)
+ if len(i) == 1:
+ if i[0] in cur_pkg_reqs:
+ del cur_pkg_reqs[idx]
+ else:
+ cur_pkg_reqs[idx] = i[0]
+ else:
+ cur_pkg_reqs[idx] = i
+ else:
+ if x in reqs:
+ return
+ if not did:
+ cur_pkg_reqs.append(reqs)
+
+ pkg_reqs = {}
+ cur_pkg_reqs = None
+ cur_pkg = None
+
+ f = chr_popen("poldek -v -v --verify --unique-pkg-names")
+ for l in xreadlines.xreadlines(f):
+ m = name_rx.match(l)
+ if m:
+ if cur_pkg:
+ pkg_reqs[cur_pkg] = cur_pkg_reqs
+ cur_pkg = m.groups(1)
+ if pkg_reqs.has_key(cur_pkg):
+ cur_pkg = None
+ cur_pkg_reqs = None
+ else:
+ cur_pkg_reqs = []
+ continue
+ m = req_rx.match(l)
+ if m:
+ reqs = []
+ for x in string.split(m.group(1)):
+ if x in ["RPMLIB_CAP", "NOT", "FOUND", "UNMATCHED"]: continue
+ m = pkg_name_rx.match(x)
+ if m:
+ reqs.append(m.group(1))
+ else:
+ msg("poldek_reqs: bad pkg name: %s\n" % x)
+ if len(reqs) != 0: add_req(reqs)
+
+ f.close()
+
+ if cur_pkg:
+ pkg_reqs[cur_pkg] = cur_pkg_reqs
+
+ return pkg_reqs
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import string
+import path
+
+import ftp
+import stopwatch
+import mailer
+from config import config
+
+def unpackaged_files(b):
+ msg = "warning: Installed (but unpackaged) file(s) found:\n"
+ f = open(b.logfile)
+ copy_mode = 0
+ out = []
+ for l in f.xreadlines():
+ if l == msg:
+ copy_mode = 1
+ out.append(l)
+ elif copy_mode:
+ if l[0] != ' ':
+ copy_mode = 0
+ else:
+ out.append(l)
+ return out
+
+def add_pld_builder_info(b):
+ l = open(b.logfile, "a")
+ l.write("Begin-PLD-Builder-Info\n")
+ l.write("Build-Time: %s\n\n" % b.build_time)
+ st = ftp.status()
+ if st != "":
+ l.write("Files queued for ftp:\n%s\n" % st)
+ ftp.clear_status()
+ l.writelines(unpackaged_files(b))
+ l.write("End-PLD-Builder-Info\n")
+
+def info_from_log(b, target):
+ beg = "Begin-PLD-Builder-Info\n"
+ end = "End-PLD-Builder-Info\n"
+ f = open(b.logfile)
+ copy_mode = 0
+ need_header = 1
+ for l in f.xreadlines():
+ if l == beg:
+ if need_header:
+ need_header = 0
+ target.write("\n--- %s:%s:\n" % (b.spec, b.branch))
+ copy_mode = 1
+ elif copy_mode:
+ if l == end:
+ copy_mode = 0
+ else:
+ target.write(l)
+
+def send_report(r, is_src = False):
+ s_failed = ' '.join([b.spec for b in r.batches if b.build_failed])
+ s_ok = ' '.join([b.spec for b in r.batches if not b.build_failed])
+ upgrades_status = [b.upgraded for b in r.batches]
+
+ if s_failed: s_failed = "ERRORS: %s" % s_failed
+ if s_ok: s_ok = "OK: %s" % s_ok
+
+ subject = ''
+
+ if 'test-build' in r.flags:
+ subject = 'TEST build '
+
+ if not is_src and 'upgrade' in r.flags and False in upgrades_status:
+ subject = 'upgrade failed '
+
+ subject += ' '.join((s_failed, s_ok)).strip()
+
+ m = mailer.Message()
+ m.set_headers(to = r.requester_email,
+ cc = config.builder_list,
+ subject = subject[0:100])
+ if is_src:
+ m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
+ else:
+ m.set_header("References", "<%s@pld.src.builder>" % r.id)
+ m.set_header("In-Reply-To", "<%s@pld.src.builder>" % r.id)
+
+ for b in r.batches:
+ if b.build_failed and b.logfile == None:
+ info = b.skip_reason
+ elif b.build_failed:
+ info = "FAILED"
+ else:
+ info = "OK"
+ m.write("%s (%s): %s\n" % (b.spec, b.branch, info))
+
+ for b in r.batches:
+ if b.logfile != None:
+ info_from_log(b, m)
+
+ for b in r.batches:
+ if (b.is_command () or b.build_failed) and b.logfile != None:
+ m.write("\n\n*** buildlog for %s\n" % b.spec)
+ m.append_log(b.logfile)
+ m.write("\n\n")
+
+ m.send()
+
+def send_cia_report(r, is_src = False):
+
+ subject = 'DeliverXML'
+
+ m = mailer.Message()
+ if (len(config.bot_email) == 0):
+ return
+
+ m.set_headers(to = config.bot_email,
+ subject = subject)
+ m.set_header("Message-ID", "<%s@pld.src.builder>" % r.id)
+ m.set_header("X-mailer", "$Id$")
+ m.set_header("X-builder", "PLD")
+
+ # get header of xml message from file
+ f = open(path.root_dir + '/PLD_Builder/cia-head.xml')
+ m.write(f.read())
+ f.close()
+
+ # write in iteration list and status of all processed files
+ for b in r.batches:
+ m.write('<package name="%s" arch="%s">\n' % (b.spec, b.branch))
+ if b.build_failed:
+ m.write('<failed/>\n')
+ else:
+ m.write('<success/>\n')
+ m.write('</package>\n')
+
+ # get footer of xml message from file
+ f = open(path.root_dir + '/PLD_Builder/cia-foot.xml')
+ m.write(f.read())
+ f.close()
+
+ # send the e-mail
+ m.send()
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+from xml.dom.minidom import *
+import string
+import time
+import xml.sax.saxutils
+import fnmatch
+import os
+import urllib
+import cgi
+
+import util
+import log
+from acl import acl
+from config import config
+
+__all__ = ['parse_request', 'parse_requests']
+
+def text(e):
+ res = ""
+ for n in e.childNodes:
+ if n.nodeType != Element.TEXT_NODE:
+ log.panic("xml: text expected in <%s>, got %d" % (e.nodeName, n.nodeType))
+ res += n.nodeValue
+ return res
+
+def attr(e, a, default = None):
+ try:
+ return e.attributes[a].value
+ except:
+ if default != None:
+ return default
+ raise
+
+def escape(s):
+ return xml.sax.saxutils.escape(s)
+
+# return timestamp with timezone information
+# so we could parse it in javascript
+def tzdate(t):
+ # as strftime %z is unofficial, and does not work, need to make it numeric ourselves
+# date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
+ date = time.strftime("%a %b %d %Y %H:%M:%S", time.localtime(t))
+ # NOTE: the altzone is showing CURRENT timezone, not what the "t" reflects
+ # NOTE: when DST is off timezone gets it right, altzone not
+ if time.daylight:
+ tzoffset = time.altzone
+ else:
+ tzoffset = time.timezone
+ tz = '%+05d' % (-tzoffset / 3600 * 100)
+ return date + ' ' + tz
+
+def is_blank(e):
+ return e.nodeType == Element.TEXT_NODE and string.strip(e.nodeValue) == ""
+
+class Group:
+ def __init__(self, e):
+ self.batches = []
+ self.kind = 'group'
+ self.id = attr(e, "id")
+ self.no = int(attr(e, "no"))
+ self.priority = 2
+ self.time = time.time()
+ self.requester = ""
+ self.max_jobs = 0
+ self.requester_email = ""
+ self.flags = string.split(attr(e, "flags", ""))
+ for c in e.childNodes:
+ if is_blank(c): continue
+
+ if c.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil group child %d" % c.nodeType)
+ if c.nodeName == "batch":
+ self.batches.append(Batch(c))
+ elif c.nodeName == "requester":
+ self.requester = text(c)
+ self.requester_email = attr(c, "email", "")
+ elif c.nodeName == "priority":
+ self.priority = int(text(c))
+ elif c.nodeName == "time":
+ self.time = int(text(c))
+ elif c.nodeName == "maxjobs":
+ self.max_jobs = int(text(c))
+ else:
+ log.panic("xml: evil group child (%s)" % c.nodeName)
+ # note that we also check that group is sorted WRT deps
+ m = {}
+ for b in self.batches:
+ deps = []
+ m[b.b_id] = b
+ for dep in b.depends_on:
+ if m.has_key(dep):
+ # avoid self-deps
+ if id(m[dep]) != id(b):
+ deps.append(m[dep])
+ else:
+ log.panic("xml: dependency not found in group")
+ b.depends_on = deps
+ if self.requester_email == "" and self.requester != "":
+ self.requester_email = acl.user(self.requester).mail_to()
+
+ def dump(self, f):
+ f.write("group: %d (id=%s pri=%d)\n" % (self.no, self.id, self.priority))
+ f.write(" from: %s\n" % self.requester)
+ f.write(" flags: %s\n" % string.join(self.flags))
+ f.write(" time: %s\n" % time.asctime(time.localtime(self.time)))
+ for b in self.batches:
+ b.dump(f)
+ f.write("\n")
+
+ def dump_html(self, f):
+ f.write(
+ "<div id=\"%(no)d\" class=\"%(flags)s\">\n"
+ "<a href=\"#%(no)d\">%(no)d</a>. <span id=\"tz\">%(time)s</span> from <b>%(requester)s</b> "
+ "<small>%(id)s, prio=%(priority)d, jobs=%(max_jobs)d, %(flags)s</small>\n"
+ % {
+ 'no': self.no,
+ 'id': '<a href="srpms/%(id)s">%(id)s</a>' % {'id': self.id},
+ 'time': escape(tzdate(self.time)),
+ 'requester': escape(self.requester),
+ 'priority': self.priority,
+ 'max_jobs': self.max_jobs,
+ 'flags': string.join(self.flags)
+ })
+ f.write("<ul>\n")
+ for b in self.batches:
+ b.dump_html(f, self.id)
+ f.write("</ul>\n")
+ f.write("</div>\n")
+
+ def write_to(self, f):
+ f.write("""
+ <group id="%s" no="%d" flags="%s">
+ <requester email='%s'>%s</requester>
+ <time>%d</time>
+ <priority>%d</priority>
+ <maxjobs>%d</maxjobs>\n""" % (self.id, self.no, string.join(self.flags),
+ escape(self.requester_email), escape(self.requester),
+ self.time, self.priority, self.max_jobs))
+ for b in self.batches:
+ b.write_to(f)
+ f.write(" </group>\n\n")
+
+ def is_done(self):
+ ok = 1
+ for b in self.batches:
+ if not b.is_done():
+ ok = 0
+ return ok
+
+class Batch:
+ def __init__(self, e):
+ self.bconds_with = []
+ self.bconds_without = []
+ self.builders = []
+ self.builders_status = {}
+ self.builders_status_time = {}
+ self.builders_status_buildtime = {}
+ self.kernel = ""
+ self.target = []
+ self.branch = ""
+ self.src_rpm = ""
+ self.info = ""
+ self.spec = ""
+ self.command = ""
+ self.command_flags = []
+ self.skip = []
+ self.gb_id = ""
+ self.b_id = attr(e, "id")
+ self.depends_on = string.split(attr(e, "depends-on"))
+ self.upgraded = True
+ for c in e.childNodes:
+ if is_blank(c): continue
+
+ if c.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil batch child %d" % c.nodeType)
+ if c.nodeName == "src-rpm":
+ self.src_rpm = text(c)
+ elif c.nodeName == "spec":
+ # normalize specname, specname is used as buildlog and we don't
+ # want to be exposed to directory traversal attacks
+ self.spec = text(c).split('/')[-1]
+ elif c.nodeName == "command":
+ self.spec = "COMMAND"
+ self.command = text(c).strip()
+ self.command_flags = string.split(attr(c, "flags", ""))
+ elif c.nodeName == "info":
+ self.info = text(c)
+ elif c.nodeName == "kernel":
+ self.kernel = text(c)
+ elif c.nodeName == "target":
+ self.target.append(text(c))
+ elif c.nodeName == "skip":
+ self.skip.append(text(c))
+ elif c.nodeName == "branch":
+ self.branch = text(c)
+ elif c.nodeName == "builder":
+ key = text(c)
+ self.builders.append(key)
+ self.builders_status[key] = attr(c, "status", "?")
+ self.builders_status_time[key] = attr(c, "time", "0")
+ self.builders_status_buildtime[key] = "0" #attr(c, "buildtime", "0")
+ elif c.nodeName == "with":
+ self.bconds_with.append(text(c))
+ elif c.nodeName == "without":
+ self.bconds_without.append(text(c))
+ else:
+ log.panic("xml: evil batch child (%s)" % c.nodeName)
+
+ def is_done(self):
+ ok = 1
+ for b in self.builders:
+ s = self.builders_status[b]
+ if not s.startswith("OK") and not s.startswith("SKIP") and not s.startswith("UNSUPP") and not s.startswith("FAIL"):
+ ok = 0
+ return ok
+
+ def dump(self, f):
+ f.write(" batch: %s/%s\n" % (self.src_rpm, self.spec))
+ f.write(" info: %s\n" % self.info)
+ f.write(" kernel: %s\n" % self.kernel)
+ f.write(" target: %s\n" % self.target_string())
+ f.write(" branch: %s\n" % self.branch)
+ f.write(" bconds: %s\n" % self.bconds_string())
+ builders = []
+ for b in self.builders:
+ builders.append("%s:%s" % (b, self.builders_status[b]))
+ f.write(" builders: %s\n" % string.join(builders))
+
+ def is_command(self):
+ return self.command != ""
+
+ def dump_html(self, f, rid):
+ f.write("<li>\n")
+ if self.is_command():
+ desc = "SH: <pre>%s</pre> flags: [%s]" % (self.command, ' '.join(self.command_flags))
+ else:
+ package_url = "http://git.pld-linux.org/gitweb.cgi?p=packages/%(package)s.git;f=%(spec)s;h=%(branch)s;a=shortlog" % {
+ 'spec': self.spec,
+ 'branch': self.branch,
+ 'package': self.spec[:-5],
+ }
+ desc = "%(src_rpm)s (<a href=\"%(package_url)s\">%(spec)s -r %(branch)s</a>%(bconds)s)" % {
+ 'src_rpm': self.src_rpm,
+ 'spec': self.spec,
+ 'branch': self.branch,
+ 'bconds': self.bconds_string() + self.kernel_string() + self.target_string(),
+ 'package_url': package_url,
+ }
+ f.write("%s <small>[" % desc)
+ builders = []
+ for b in self.builders:
+ s = self.builders_status[b]
+ if s.startswith("OK"):
+ c = "green"
+ elif s.startswith("FAIL"):
+ c = "red"
+ elif s.startswith("SKIP"):
+ c = "blue"
+ elif s.startswith("UNSUPP"):
+ c = "fuchsia"
+ else:
+ c = "black"
+ link_pre = ""
+ link_post = ""
+ if (s.startswith("OK") or s.startswith("SKIP") or s.startswith("UNSUPP") or s.startswith("FAIL")) and len(self.spec) > 5:
+ if self.is_command():
+ bl_name = "command"
+ else:
+ bl_name = self.spec[:len(self.spec)-5]
+ lin_ar = b.replace('noauto-','')
+ path = "/%s/%s/%s,%s.bz2" % (lin_ar.replace('-','/'), s, bl_name, rid)
+ is_ok = 0
+ if s.startswith("OK"):
+ is_ok = 1
+ bld = lin_ar.split('-')
+ tree_name = '-'.join(bld[:-1])
+ tree_arch = '-'.join(bld[-1:])
+ link_pre = "<a href=\"http://buildlogs.pld-linux.org/index.php?dist=%s&arch=%s&ok=%d&name=%s&id=%s&action=tail\">" \
+ % (urllib.quote(tree_name), urllib.quote(tree_arch), is_ok, urllib.quote(bl_name), urllib.quote(rid))
+ link_post = "</a>"
+
+ def ftime(s):
+ t = float(s)
+ if t > 0:
+ return time.asctime(time.localtime(t))
+ else:
+ return 'N/A'
+
+ tooltip = "last update: %(time)s\nbuild time: %(buildtime)s" % {
+ 'time' : ftime(self.builders_status_time[b]),
+ 'buildtime' : ftime(self.builders_status_buildtime[b]),
+ }
+ builders.append(link_pre +
+ "<font color='%(color)s'><b title=\"%(tooltip)s\">%(builder)s:%(status)s</b></font>" % {
+ 'color' : c,
+ 'builder' : b,
+ 'status' : s,
+ 'tooltip' : cgi.escape(tooltip, True),
+ }
+ + link_post)
+ f.write("%s]</small></li>\n" % string.join(builders))
+
+ def rpmbuild_opts(self):
+ """
+ return all rpmbuild options related to this build
+ """
+ bconds = self.bconds_string() + self.kernel_string() + self.target_string()
+ rpmdefs = \
+ "--define '_topdir %(echo $HOME/rpm)' " \
+ "--define '_specdir %{_topdir}/packages/%{name}' " \
+ "--define '_sourcedir %{_specdir}' " \
+ "--define '_builddir %{_topdir}/BUILD/%{name}' "
+ return rpmdefs + bconds
+
+ def kernel_string(self):
+ r = ""
+ if self.kernel != "":
+ r = " --define 'alt_kernel " + self.kernel + "'"
+ return r
+
+ def target_string(self):
+ if len(self.target) > 0:
+ return " --target " + ",".join(self.target)
+ else:
+ return ""
+
+ def bconds_string(self):
+ r = ""
+ for b in self.bconds_with:
+ r = r + " --with " + b
+ for b in self.bconds_without:
+ r = r + " --without " + b
+ return r
+
+ def default_target(self, arch):
+ self.target.append("%s-pld-linux" % arch)
+
+ def write_to(self, f):
+ f.write("""
+ <batch id='%s' depends-on='%s'>
+ <src-rpm>%s</src-rpm>
+ <command flags="%s">%s</command>
+ <spec>%s</spec>
+ <branch>%s</branch>
+ <info>%s</info>\n""" % (self.b_id,
+ string.join(map(lambda (b): b.b_id, self.depends_on)),
+ escape(self.src_rpm),
+ escape(' '.join(self.command_flags)), escape(self.command),
+ escape(self.spec), escape(self.branch), escape(self.info)))
+ if self.kernel != "":
+ f.write(" <kernel>%s</kernel>\n" % escape(self.kernel))
+ for b in self.bconds_with:
+ f.write(" <with>%s</with>\n" % escape(b))
+ for b in self.target:
+ f.write(" <target>%s</target>\n" % escape(b))
+ for b in self.bconds_without:
+ f.write(" <without>%s</without>\n" % escape(b))
+ for b in self.builders:
+ if self.builders_status_buildtime.has_key(b):
+ t = self.builders_status_buildtime[b]
+ else:
+ t = "0"
+ f.write(" <builder status='%s' time='%s' buildtime='%s'>%s</builder>\n" % \
+ (escape(self.builders_status[b]), self.builders_status_time[b], t, escape(b)))
+ f.write(" </batch>\n")
+
+ def log_line(self, l):
+ log.notice(l)
+ if self.logfile != None:
+ util.append_to(self.logfile, l)
+
+ def expand_builders(batch, all_builders):
+ all = []
+ for bld in batch.builders:
+ res = []
+ for my_bld in all_builders:
+ if fnmatch.fnmatch(my_bld, bld):
+ res.append(my_bld)
+ if res != []:
+ all.extend(res)
+ else:
+ all.append(bld)
+ batch.builders = all
+
+class Notification:
+ def __init__(self, e):
+ self.batches = []
+ self.kind = 'notification'
+ self.group_id = attr(e, "group-id")
+ self.builder = attr(e, "builder")
+ self.batches = {}
+ self.batches_buildtime = {}
+ for c in e.childNodes:
+ if is_blank(c): continue
+ if c.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil notification child %d" % c.nodeType)
+ if c.nodeName == "batch":
+ id = attr(c, "id")
+ status = attr(c, "status")
+ buildtime = attr(c, "buildtime", "0")
+ if not status.startswith("OK") and not status.startswith("SKIP") and not status.startswith("UNSUPP") and not status.startswith("FAIL"):
+ log.panic("xml notification: bad status: %s" % status)
+ self.batches[id] = status
+ self.batches_buildtime[id] = buildtime
+ else:
+ log.panic("xml: evil notification child (%s)" % c.nodeName)
+
+ def apply_to(self, q):
+ for r in q.requests:
+ if r.kind == "group":
+ for b in r.batches:
+ if self.batches.has_key(b.b_id):
+ b.builders_status[self.builder] = self.batches[b.b_id]
+ b.builders_status_time[self.builder] = time.time()
+ b.builders_status_buildtime[self.builder] = "0" #self.batches_buildtime[b.b_id]
+
+def build_request(e):
+ if e.nodeType != Element.ELEMENT_NODE:
+ log.panic("xml: evil request element")
+ if e.nodeName == "group":
+ return Group(e)
+ elif e.nodeName == "notification":
+ return Notification(e)
+ elif e.nodeName == "command":
+ # FIXME
+ return Command(e)
+ else:
+ log.panic("xml: evil request [%s]" % e.nodeName)
+
+def parse_request(f):
+ d = parseString(f)
+ return build_request(d.documentElement)
+
+def parse_requests(f):
+ d = parseString(f)
+ res = []
+ for r in d.documentElement.childNodes:
+ if is_blank(r): continue
+ res.append(build_request(r))
+ return res
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import string
+import signal
+import os
+import urllib
+import urllib2
+import StringIO
+import sys
+import gzip
+
+import path
+import log
+import status
+import lock
+import util
+import gpg
+import request
+import loop
+import socket
+from acl import acl
+from bqueue import B_Queue
+from config import config, init_conf
+
+last_count = 0
+
+def alarmalarm(signum, frame):
+ raise IOError, 'TCP connection hung'
+
+def has_new(control_url):
+ global last_count
+ cnt_f = open(path.last_req_no_file)
+ try:
+ last_count = int(string.strip(cnt_f.readline()))
+ except ValueError, e:
+ last_count = 0
+
+ cnt_f.close()
+ f = None
+ socket.setdefaulttimeout(240)
+ signal.signal(signal.SIGALRM, alarmalarm)
+ signal.alarm(300)
+ try:
+ headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
+ req = urllib2.Request(url=control_url + "/max_req_no", headers=headers)
+ f = urllib2.urlopen(req)
+ count = int(string.strip(f.readline()))
+ signal.alarm(0)
+ except Exception, e:
+ signal.alarm(0)
+ log.error("can't fetch %s: %s" % (control_url + "/max_req_no", e))
+ sys.exit(1)
+ res = 0
+ if count != last_count:
+ res = 1
+ f.close()
+ return res
+
+def fetch_queue(control_url):
+ signal.signal(signal.SIGALRM, alarmalarm)
+ socket.setdefaulttimeout(240)
+ signal.alarm(300)
+ try:
+ headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
+ req = urllib2.Request(url=control_url + "/queue.gz", headers=headers)
+ f = urllib2.urlopen(req)
+ signal.alarm(0)
+ except Exception, e:
+ signal.alarm(0)
+ log.error("can't fetch %s: %s" % (control_url + "/queue.gz", e))
+ sys.exit(1)
+ sio = StringIO.StringIO()
+ util.sendfile(f, sio)
+ f.close()
+ sio.seek(0)
+ f = gzip.GzipFile(fileobj = sio)
+ (signers, body) = gpg.verify_sig(f.read())
+ u = acl.user_by_email(signers)
+ if u == None:
+ log.alert("queue.gz not signed with signature of valid user: %s" % signers)
+ sys.exit(1)
+ if not u.can_do("sign_queue", "all"):
+ log.alert("user %s is not allowed to sign my queue" % u.login)
+ sys.exit(1)
+ return request.parse_requests(body)
+
+def handle_reqs(builder, reqs):
+ qpath = path.queue_file + "-" + builder
+ if not os.access(qpath, os.F_OK):
+ util.append_to(qpath, "<queue/>\n")
+ q = B_Queue(qpath)
+ q.lock(0)
+ q.read()
+ for r in reqs:
+ if r.kind != 'group':
+ raise Exception, 'handle_reqs: fatal: huh? %s' % r.kind
+ need_it = 0
+ for b in r.batches:
+ if builder in b.builders:
+ need_it = 1
+ if need_it:
+ log.notice("queued %s (%d) for %s" % (r.id, r.no, builder))
+ q.add(r)
+ q.write()
+ q.unlock()
+
+def main():
+ lck = lock.lock("request_fetcher", non_block = True)
+ if lck == None:
+ sys.exit(1)
+ init_conf()
+ acl.try_reload()
+
+ status.push("fetching requests")
+ if has_new(config.control_url):
+ q = fetch_queue(config.control_url)
+ max_no = 0
+ q_new = []
+ for r in q:
+ if r.no > max_no:
+ max_no = r.no
+ if r.no > last_count:
+ q_new.append(r)
+ for b in config.binary_builders:
+ handle_reqs(b, q_new)
+ f = open(path.last_req_no_file, "w")
+ f.write("%d\n" % max_no)
+ f.close()
+ status.pop()
+ lck.close()
+
+if __name__ == '__main__':
+ # http connection is established (and few bytes transferred through it)
+ # each $secs seconds.
+ loop.run_loop(main, secs = 10)
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import email
+import string
+import time
+import os
+import StringIO
+import sys
+import fnmatch
+
+import gpg
+import request
+import log
+import path
+import util
+import wrap
+import status
+from acl import acl
+from lock import lock
+from bqueue import B_Queue
+from config import config, init_conf
+
+def check_double_id(id):
+ id_nl = id + "\n"
+
+ ids = open(path.processed_ids_file)
+ for i in ids.xreadlines():
+ if i == id_nl:
+ # FIXME: security email here?
+ log.alert("request %s already processed" % id)
+ return 1
+ ids.close()
+
+ ids = open(path.processed_ids_file, "a")
+ ids.write(id_nl)
+ ids.close()
+
+ return 0
+
+def handle_group(r, user):
+ lockf = None
+ def fail_mail(msg):
+ if len(r.batches) >= 1:
+ spec = r.batches[0].spec
+ else:
+ spec = "None.spec"
+ log.error("%s: %s" % (spec, msg))
+ m = user.message_to()
+ m.set_headers(subject = "building %s failed" % spec)
+ m.write_line(msg)
+ m.send()
+
+ lockf = lock("request")
+ if check_double_id(r.id):
+ lockf.close()
+ return
+
+ for batch in r.batches:
+
+ if not user.can_do("src", config.builder, batch.branch):
+ fail_mail("user %s is not allowed to src:%s:%s" \
+ % (user.get_login(), config.builder, batch.branch))
+ lockf.close()
+ return
+
+ if 'test-build' in r.flags and 'upgrade' in r.flags:
+ fail_mail("it's forbidden to upgrade from a test build")
+ lockf.close()
+ return
+
+ if "upgrade" in r.flags and not user.can_do("upgrade", config.builder, batch.branch):
+ fail_mail("user %s is not allowed to upgrade:%s:%s" \
+ % (user.get_login(), config.builder, batch.branch))
+ lockf.close()
+ return
+
+ # src builder handles only special commands
+ if batch.is_command() and (batch.command in ["git pull"] or batch.command[:5] == "skip:" or config.builder in batch.builders):
+ batch.expand_builders(config.binary_builders + [config.src_builder])
+ else:
+ batch.expand_builders(config.binary_builders)
+
+ if not batch.is_command() and config.builder in batch.builders:
+ batch.builders.remove(config.builder)
+
+ for bld in batch.builders:
+ batch.builders_status[bld] = '?'
+ batch.builders_status_time[bld] = time.time()
+ if bld not in config.binary_builders and bld != config.builder:
+ fail_mail("I (src rpm builder '%s') do not handle binary builder '%s', only '%s'" % \
+ (config.builder, bld, string.join(config.binary_builders)))
+ lockf.close()
+ return
+ if batch.is_command():
+ if "no-chroot" in batch.command_flags:
+ if not user.can_do("command-no-chroot", bld):
+ fail_mail("user %s is not allowed to command-no-chroot:%s" \
+ % (user.get_login(), bld))
+ lockf.close()
+ return
+ if not user.can_do("command", bld):
+ fail_mail("user %s is not allowed to command:%s" \
+ % (user.get_login(), bld))
+ lockf.close()
+ return
+ elif not user.can_do("binary", bld, batch.branch):
+ pkg = batch.spec
+ if pkg.endswith(".spec"):
+ pkg = pkg[:-5]
+ if not user.can_do("binary-" + pkg, bld, batch.branch):
+ fail_mail("user %s is not allowed to binary-%s:%s:%s" \
+ % (user.get_login(), pkg, bld, batch.branch))
+ lockf.close()
+ return
+
+ r.priority = user.check_priority(r.priority,config.builder)
+ r.requester = user.get_login()
+ r.requester_email = user.mail_to()
+ r.time = time.time()
+ log.notice("queued %s from %s" % (r.id, user.get_login()))
+ q = B_Queue(path.queue_file)
+ q.lock(0)
+ q.read()
+ q.add(r)
+ q.write()
+ q.unlock()
+ lockf.close()
+
+def handle_notification(r, user):
+ if not user.can_do("notify", r.builder):
+ log.alert("user %s is not allowed to notify:%s" % (user.login, r.builder))
+ q = B_Queue(path.req_queue_file)
+ q.lock(0)
+ q.read()
+ not_fin = filter(lambda (r): not r.is_done(), q.requests)
+ r.apply_to(q)
+ for r in not_fin:
+ if r.is_done():
+ util.clean_tmp(path.srpms_dir + '/' + r.id)
+ now = time.time()
+ def leave_it(r):
+ # for ,,done'' set timeout to 4d
+ if r.is_done() and r.time + 4 * 24 * 60 * 60 < now:
+ return False
+ # and for not ,,done'' set it to 20d
+ if r.time + 20 * 24 * 60 * 60 < now:
+ util.clean_tmp(path.srpms_dir + '/' + r.id)
+ return False
+ return True
+ q.requests = filter(leave_it, q.requests)
+ q.write()
+ q.dump(path.queue_stats_file)
+ q.dump_html(path.queue_html_stats_file)
+ q.write_signed(path.req_queue_signed_file)
+ q.unlock()
+
+def handle_request(req, filename = None):
+ if req == '':
+ log.alert('Empty body received. Filename: %s' % filename)
+ return False
+
+ keys = gpg.get_keys(req)
+ (em, body) = gpg.verify_sig(req)
+ if not em:
+ log.alert("Invalid signature, missing/untrusted key. Keys in gpg batch: '%s'" % keys)
+ return False
+ user = acl.user_by_email(em)
+ if user == None:
+ # FIXME: security email here
+ log.alert("'%s' not in acl. Keys in gpg batch: '%s'" % (em, keys))
+ return False
+
+ acl.set_current_user(user)
+ status.push("request from %s" % user.login)
+ r = request.parse_request(body)
+ if r.kind == 'group':
+ handle_group(r, user)
+ elif r.kind == 'notification':
+ handle_notification(r, user)
+ else:
+ msg = "%s: don't know how to handle requests of this kind '%s'" \
+ % (user.get_login(), r.kind)
+ log.alert(msg)
+ m = user.message_to()
+ m.set_headers(subject = "unknown request")
+ m.write_line(msg)
+ m.send()
+ status.pop()
+ return True
+
+def handle_request_main(req, filename = None):
+ acl.try_reload()
+ init_conf("src")
+ status.push("handling email request")
+ ret = handle_request(req, filename = filename)
+ status.pop()
+ return ret
+
+def main():
+ sys.exit(not handle_request_main(sys.stdin.read()))
+
+if __name__ == '__main__':
+ wrap.wrap(main)
--- /dev/null
+#!/bin/sh
+self=$0
+if [ -L "$self" ]; then
+ self=$(readlink -f "$0")
+fi
+sock=$(dirname "$self")/request_handler_server.sock
+daemon=0
+attach=0
+
+if [ x"$1" = x"--daemon" -o x"$1" = x"-d" ]; then
+ daemon=1
+fi
+if [ x"$1" = x"--attach" -o x"$1" = x"-a" ]; then
+ attach=1
+fi
+
+if [ ! -S $sock ]; then
+ daemon=1
+fi
+
+# if none of the modes attach to it
+if [ $daemon = 0 -a $attach = 0 ]; then
+ attach=1
+fi
+
+if [ "$daemon" = 1 ]; then
+ echo "Starting request_handler_server"
+ rm -f $sock
+ cd $(dirname $sock)
+ exec dtach -n $(basename $sock) -r none python request_handler_server.py
+ exit 1
+fi
+
+# attach to session
+if [ "$attach" = 1 ]; then
+ echo "Attaching to request_handler_server"
+ exec dtach -a $sock
+ exit 1
+fi
--- /dev/null
+#!/usr/bin/python
+
+import socket
+import string
+import cgi
+import time
+import log
+import sys
+import traceback
+import os
+from config import config, init_conf
+
+from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+
+import request_handler
+import path
+
+class MyHandler(BaseHTTPRequestHandler):
+
+ def do_GET(self):
+ self.send_error(401);
+
+ def do_POST(self):
+ global rootnode
+ try:
+ length = int(self.headers.getheader('content-length'))
+ ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
+ if ctype != 'application/x-www-form-urlencoded':
+ log.error("request_handler_server: [%s]: 401 Unauthorized" % self.client_address[0])
+ self.send_error(401)
+ self.end_headers()
+ return
+
+ query = self.rfile.read(length)
+
+ filename = self.headers.getheader('x-filename')
+
+ if not request_handler.handle_request_main(query, filename = filename):
+ error = log.last_log();
+ log.error("request_handler_server: [%s]: handle_request_main(..., %s) failed" % (self.client_address[0], filename))
+ self.send_error(500, "%s: request failed. %s" % (filename, error))
+ self.end_headers()
+ return
+
+ self.send_response(200)
+ self.end_headers()
+
+ except Exception, e:
+ self.send_error(500, "%s: %s" % (filename, e))
+ self.end_headers()
+ log.error("request_handler_server: [%s]: exception: %s\n%s" % (self.client_address[0], e, traceback.format_exc()))
+ raise
+ pass
+
+def write_css():
+ css_file = path.www_dir + "/style.css"
+ # skip if file exists and code is not newer
+ if os.path.exists(css_file) and os.stat(__file__).st_mtime < os.stat(css_file).st_mtime:
+ return
+
+ # css from www.pld-linux.org wiki theme, using css usage firebug plugin to cleanup
+ css = """
+html {
+ background-color: white;
+ color: #5e5e5e;
+ font-family: Tahoma, Arial, Lucida Grande, sans-serif;
+ font-size: 0.75em;
+ line-height: 1.25em;
+}
+
+a {
+ text-decoration: underline;
+ color: #006;
+}
+
+a:hover {
+ color: #006;
+}
+
+pre {
+ background: #FFF8EB;
+ border: 1pt solid #FFE2AB;
+ font-family: courier, monospace;
+ padding: 0.5em;
+ white-space: pre-wrap;
+ word-wrap: break-word;
+}
+
+@media screen, projection {
+ html {
+ background-color: #f3efe3;
+ }
+
+ body {
+ position: relative;
+ }
+
+ div {
+ background-color: white;
+ margin: 10px 0px;
+ padding: 2px;
+ }
+ div > a {
+ font-weight: bold;
+ color: #5e5e5e;
+ }
+ div > a:hover {
+ color: #5e5e5e;
+ }
+ div.upgrade {
+ background-color: #e4f1cf;
+ }
+ div:target {
+ background-color: #ffffcc;
+ color: black;
+ }
+}
+@media print {
+ a {
+ background-color: inherit;
+ color: inherit;
+ }
+}
+
+@media projection {
+ html { line-height: 1.8em; }
+ body, b, a, p { font-size: 22pt; }
+}
+"""
+ old_umask = os.umask(0022)
+ f = open(css_file, "w")
+ f.write(css)
+ f.close()
+ os.umask(old_umask)
+
+def write_js():
+ js_file = path.www_dir + "/script.js"
+ # skip if file exists and code is not newer
+ if os.path.exists(js_file) and os.stat(__file__).st_mtime < os.stat(js_file).st_mtime:
+ return
+
+ js = """
+// update date stamps to reflect viewers timezone
+function update_tz(t) {
+ var el, off, dt,
+ collection = document.getElementsByTagName('span');
+ for (off in collection) {
+ el = collection[off];
+ if (el.id == 'tz') {
+ dt = new Date(el.innerHTML).toString();
+ // strip timezone name, it is usually wrong when not initialized
+ // from TZ env, but reverse calculated from os data
+ dt = dt.replace(/\s+\(.+\)/, "");
+ // strip "GMT"
+ dt = dt.replace(/GMT/, "");
+ el.innerHTML = dt;
+ }
+ }
+}
+window.onload = update_tz;
+"""
+ old_umask = os.umask(0022)
+ f = open(js_file, "w")
+ f.write(js)
+ f.close()
+ os.umask(old_umask)
+
+def main():
+ write_css();
+ write_js();
+ socket.setdefaulttimeout(30)
+ try:
+ init_conf()
+ host = ""
+ port = config.request_handler_server_port
+
+ try:
+ server = HTTPServer((host, port), MyHandler)
+ except Exception, e:
+ log.notice("request_handler_server: can't start server on [%s:%d]: %s" % (host, port, e))
+ print >> sys.stderr, "ERROR: Can't start server on [%s:%d]: %s" % (host, port, e)
+ sys.exit(1)
+
+ log.notice('request_handler_server: started on [%s:%d]...' % (host, port))
+ server.serve_forever()
+ except KeyboardInterrupt:
+ log.notice('request_handler_server: ^C received, shutting down server')
+ server.socket.close()
+
+if __name__ == '__main__':
+ main()
+
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import sys
+import os
+import atexit
+import time
+import datetime
+import string
+import urllib
+import urllib2
+
+from config import config, init_conf
+from bqueue import B_Queue
+import lock
+import util
+import loop
+import path
+import status
+import log
+import chroot
+import ftp
+import buildlogs
+import notify
+import build
+import report
+import install
+
+# *HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*
+import socket
+
+socket.myorigsocket=socket.socket
+
+def mysocket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
+ s=socket.myorigsocket(family, type, proto)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+ return s
+
+socket.socket=mysocket
+# *HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*HACK*
+
+# this code is duplicated in srpm_builder, but we
+# might want to handle some cases differently here
+def pick_request(q):
+ def mycmp(r1, r2):
+ if r1.kind != 'group' or r2.kind != 'group':
+ raise Exception, "non-group requests"
+ pri_diff = cmp(r1.priority, r2.priority)
+ if pri_diff == 0:
+ return cmp(r1.time, r2.time)
+ else:
+ return pri_diff
+ q.requests.sort(mycmp)
+ ret = q.requests[0]
+ return ret
+
+def check_skip_build(r, b):
+ src_url = config.control_url + "/srpms/" + r.id + "/skipme"
+ good = False
+ b.log_line("checking if we should skip the build")
+ while not good:
+ try:
+ headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
+ req = urllib2.Request(url=src_url, headers=headers)
+ f = urllib2.urlopen(req)
+ good = True
+ except urllib2.HTTPError, error:
+ return False
+ except urllib2.URLError, error:
+ # see errno.h
+ try:
+ errno = error.errno
+ except AttributeError:
+ # python 2.4
+ errno = error.reason[0]
+
+ if errno in [-3, 60, 61, 110, 111]:
+ b.log_line("unable to connect... trying again")
+ continue
+ else:
+ return False
+ f.close()
+ return True
+ return False
+
+def fetch_src(r, b):
+ src_url = config.control_url + "/srpms/" + r.id + "/" + urllib.quote(b.src_rpm)
+ b.log_line("fetching %s" % src_url)
+ start = time.time()
+ good = False
+ while not good:
+ try:
+ headers = { 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' }
+ req = urllib2.Request(url=src_url, headers=headers)
+ f = urllib2.urlopen(req)
+ good = True
+ except urllib2.HTTPError, error:
+ # fail in a way where cron job will retry
+ msg = "unable to fetch url %s, http code: %d" % (src_url, error.code)
+ b.log_line(msg)
+ queue_time = time.time() - r.time
+ # 6 hours
+ if error.code != 404 or (queue_time >= 0 and queue_time < (6 * 60 * 60)):
+ raise IOError, msg
+ else:
+ msg = "in queue for more than 6 hours, download failing"
+ b.log_line(msg)
+ return False
+ except urllib2.URLError, error:
+ # see errno.h
+ try:
+ errno = error.errno
+ except AttributeError:
+ # python 2.4
+ errno = error.reason[0]
+
+ if errno in [-3, 60, 61, 110, 111]:
+ b.log_line("unable to connect to %s... trying again" % (src_url))
+ continue
+ else:
+ raise
+
+ o = chroot.popen("cat > %s" % b.src_rpm, mode = "w")
+
+ try:
+ bytes = util.sendfile(f, o)
+ except IOError, e:
+ b.log_line("error: unable to write to `%s': %s" % (b.src_rpm, e))
+ raise
+
+ f.close()
+ o.close()
+ t = time.time() - start
+ if t == 0:
+ b.log_line("fetched %d bytes" % bytes)
+ else:
+ b.log_line("fetched %d bytes, %.1f K/s" % (bytes, bytes / 1024.0 / t))
+
+def prepare_env():
+ chroot.run("""
+ test ! -f /proc/uptime && mount /proc 2>/dev/null
+ test ! -c /dev/full && rm -f /dev/full && mknod -m 666 /dev/full c 1 7
+ test ! -c /dev/null && rm -f /dev/null && mknod -m 666 /dev/null c 1 3
+ test ! -c /dev/random && rm -f /dev/random && mknod -m 644 /dev/random c 1 8
+ test ! -c /dev/urandom && rm -f /dev/urandom && mknod -m 644 /dev/urandom c 1 9
+ test ! -c /dev/zero && rm -f /dev/zero && mknod -m 666 /dev/zero c 1 5
+
+ # need entry for "/" in mtab, for diskspace() to work in rpm
+ [ -z $(awk '$2 == "/" {print $1}' /etc/mtab) ] && mount -f -t rootfs rootfs /
+
+ # make neccessary files readable for builder user
+ # TODO: see if they really aren't readable for builder
+ for db in Packages Name Basenames Providename Pubkeys; do
+ db=/var/lib/rpm/$db
+ test -f $db && chmod a+r $db
+ done
+
+ # try to limit network access for builder account
+ /bin/setfacl -m u:builder:--- /etc/resolv.conf
+ """, 'root')
+
+def build_rpm(r, b):
+ if len(b.spec) <= 5:
+ # should not really get here
+ b.log_line("error: No .spec not given of malformed: '%s'" % b.spec)
+ res = "FAIL_INTERNAL"
+ return res
+
+ packagename = b.spec[:-5]
+ status.push("building %s (%s)" % (b.spec, packagename))
+ b.log_line("request from: %s" % r.requester)
+
+ if check_skip_build(r, b):
+ b.log_line("build skipped due to src builder request")
+ res = "SKIP_REQUESTED"
+ return res
+
+ b.log_line("started at: %s" % time.asctime())
+ fetch_src(r, b)
+ b.log_line("installing srpm: %s" % b.src_rpm)
+ res = chroot.run("""
+ # b.id %(bid)s
+ set -ex;
+ install -d rpm/packages/%(package)s rpm/BUILD/%(package)s;
+ rpm -Uhv %(rpmdefs)s %(src_rpm)s;
+ rm -f %(src_rpm)s;
+ """ % {
+ 'bid' : b.b_id,
+ 'package' : packagename,
+ 'rpmdefs' : b.rpmbuild_opts(),
+ 'src_rpm' : b.src_rpm
+ }, logfile = b.logfile)
+ b.files = []
+
+ # it's better to have TMPDIR and BUILD dir on same partition:
+ # + /usr/bin/bzip2 -dc /home/services/builder/rpm/packages/kernel/patch-2.6.27.61.bz2
+ # patch: **** Can't rename file /tmp/B.a1b1d3/poKWwRlp to drivers/scsi/hosts.c : No such file or directory
+ tmpdir = os.environ.get('HOME') + "/rpm/BUILD/%s/tmp" % packagename
+ if res:
+ b.log_line("error: installing src rpm failed")
+ res = "FAIL_SRPM_INSTALL"
+ else:
+ prepare_env()
+ chroot.run("install -m 700 -d %s" % tmpdir)
+
+ b.default_target(config.arch)
+ # check for build arch before filling BR
+ cmd = "set -ex; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \
+ "rpmbuild -bp --short-circuit --nodeps %(rpmdefs)s --define 'prep exit 0' rpm/packages/%(package)s/%(spec)s" % {
+ 'tmpdir': tmpdir,
+ 'nice' : config.nice,
+ 'rpmdefs' : b.rpmbuild_opts(),
+ 'package' : packagename,
+ 'spec': b.spec,
+ }
+ res = chroot.run(cmd, logfile = b.logfile)
+ if res:
+ res = "UNSUPP"
+ b.log_line("error: build arch check (%s) failed" % cmd)
+
+ if not res:
+ if ("no-install-br" not in r.flags) and not install.uninstall_self_conflict(b):
+ res = "FAIL_DEPS_UNINSTALL"
+ if ("no-install-br" not in r.flags) and not install.install_br(r, b):
+ res = "FAIL_DEPS_INSTALL"
+ if not res:
+ max_jobs = max(min(int(os.sysconf('SC_NPROCESSORS_ONLN') + 1), config.max_jobs), 1)
+ if r.max_jobs > 0:
+ max_jobs = max(min(config.max_jobs, r.max_jobs), 1)
+ cmd = "set -ex; : build-id: %(r_id)s; TMPDIR=%(tmpdir)s exec nice -n %(nice)s " \
+ "rpmbuild -bb --define '_smp_mflags -j%(max_jobs)d' %(rpmdefs)s rpm/packages/%(package)s/%(spec)s" % {
+ 'r_id' : r.id,
+ 'tmpdir': tmpdir,
+ 'nice' : config.nice,
+ 'rpmdefs' : b.rpmbuild_opts(),
+ 'package' : packagename,
+ 'max_jobs' : max_jobs,
+ 'spec': b.spec,
+ }
+ b.log_line("building RPM using: %s" % cmd)
+ begin_time = time.time()
+ res = chroot.run(cmd, logfile = b.logfile)
+ end_time = time.time()
+ b.log_line("ended at: %s, done in %s" % (time.asctime(), datetime.timedelta(0, end_time - begin_time)))
+ if res:
+ res = "FAIL"
+ files = util.collect_files(b.logfile)
+ if len(files) > 0:
+ r.chroot_files.extend(files)
+ else:
+ b.log_line("error: No files produced.")
+ last_section = util.find_last_section(b.logfile)
+ if last_section == None:
+ res = "FAIL"
+ else:
+ res = "FAIL_%s" % last_section.upper()
+ b.files = files
+
+ chroot.run("""
+ set -ex;
+ rpmbuild %(rpmdefs)s --nodeps --nobuild --clean --rmspec --rmsource rpm/packages/%(package)s/%(spec)s
+ rm -rf %(tmpdir)s;
+ chmod -R u+rwX rpm/BUILD/%(package)s;
+ rm -rf rpm/BUILD/%(package)s;
+ """ %
+ {'tmpdir' : tmpdir, 'spec': b.spec, 'package' : packagename, 'rpmdefs' : b.rpmbuild_opts()}, logfile = b.logfile)
+
+ def ll(l):
+ util.append_to(b.logfile, l)
+
+ if b.files != []:
+ rpm_cache_dir = config.rpm_cache_dir
+ if "test-build" not in r.flags:
+ # NOTE: copying to cache dir doesn't mean that build failed, so ignore result
+ b.log_line("copy rpm files to cache_dir: %s" % rpm_cache_dir)
+ chroot.run(
+ "cp -f %s %s && poldek --mo=nodiff --mkidxz -s %s/" % \
+ (string.join(b.files), rpm_cache_dir, rpm_cache_dir),
+ logfile = b.logfile, user = "root"
+ )
+ else:
+ ll("test-build: not copying to " + rpm_cache_dir)
+ ll("Begin-PLD-Builder-Info")
+ if "upgrade" in r.flags:
+ b.upgraded = install.upgrade_from_batch(r, b)
+ else:
+ ll("not upgrading")
+ ll("End-PLD-Builder-Info")
+
+ for f in b.files:
+ local = r.tmp_dir + os.path.basename(f)
+ chroot.cp(f, outfile = local, rm = True)
+ ftp.add(local)
+
+ def uploadinfo(b):
+ c="file:SRPMS:%s\n" % b.src_rpm
+ for f in b.files:
+ c=c + "file:ARCH:%s\n" % os.path.basename(f)
+ c=c + "END\n"
+ return c
+
+ if config.gen_upinfo and b.files != [] and 'test-build' not in r.flags:
+ fname = r.tmp_dir + b.src_rpm + ".uploadinfo"
+ f = open(fname, "w")
+ f.write(uploadinfo(b))
+ f.close()
+ ftp.add(fname, "uploadinfo")
+
+ status.pop()
+
+ return res
+
+def handle_request(r):
+ ftp.init(r)
+ buildlogs.init(r)
+ build.build_all(r, build_rpm)
+ report.send_report(r, is_src = False)
+ ftp.flush()
+ notify.send(r)
+
+def check_load():
+ do_exit = 0
+ try:
+ f = open("/proc/loadavg")
+ if float(string.split(f.readline())[2]) > config.max_load:
+ do_exit = 1
+ except:
+ pass
+ if do_exit:
+ sys.exit(0)
+
+def main_for(builder):
+ msg = ""
+
+ init_conf(builder)
+
+ q = B_Queue(path.queue_file + "-" + config.builder)
+ q.lock(0)
+ q.read()
+ if q.requests == []:
+ q.unlock()
+ return
+ req = pick_request(q)
+ q.unlock()
+
+ # high priority tasks have priority < 0, normal tasks >= 0
+ if req.priority >= 0:
+
+ # allow only one build in given builder at once
+ if not lock.lock("building-rpm-for-%s" % config.builder, non_block = 1):
+ return
+ # don't kill server
+ check_load()
+ # not more then job_slots builds at once
+ locked = 0
+ for slot in range(config.job_slots):
+ if lock.lock("building-rpm-slot-%d" % slot, non_block = 1):
+ locked = 1
+ break
+ if not locked:
+ return
+
+ # record fact that we got lock for this builder, load balancer
+ # will use it for fair-queuing
+ l = lock.lock("got-lock")
+ f = open(path.got_lock_file, "a")
+ f.write(config.builder + "\n")
+ f.close()
+ l.close()
+ else:
+ msg = "HIGH PRIORITY: "
+
+ msg += "handling request %s (%d) for %s from %s, priority %s" \
+ % (req.id, req.no, config.builder, req.requester, req.priority)
+ log.notice(msg)
+ status.push(msg)
+ handle_request(req)
+ status.pop()
+
+ def otherreqs(r):
+ if r.no==req.no:
+ return False
+ else:
+ return True
+
+ q = B_Queue(path.queue_file + "-" + config.builder)
+ q.lock(0)
+ q.read()
+ previouslen=len(q.requests)
+ q.requests=filter(otherreqs, q.requests)
+ if len(q.requests)<previouslen:
+ q.write()
+ q.unlock()
+
+def main():
+ if len(sys.argv) < 2:
+ raise Exception, "fatal: need to have builder name as first arg"
+ return main_for(sys.argv[1])
+
+if __name__ == '__main__':
+ loop.run_loop(main)
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import email
+import string
+import time
+import os
+import StringIO
+import sys
+import re
+import shutil
+import atexit
+import tempfile
+
+import gpg
+import request
+import log
+import path
+import util
+import loop
+import chroot
+import ftp
+import buildlogs
+import notify
+import status
+import build
+import report
+
+from lock import lock
+from bqueue import B_Queue
+from config import config, init_conf
+
+def pick_request(q):
+ def mycmp(r1, r2):
+ if r1.kind != 'group' or r2.kind != 'group':
+ raise Exception, "non-group requests"
+ pri_diff = cmp(r1.priority, r2.priority)
+ if pri_diff == 0:
+ return cmp(r1.time, r2.time)
+ else:
+ return pri_diff
+ q.requests.sort(mycmp)
+ ret = q.requests[0]
+ q.requests = q.requests[1:]
+ return ret
+
+def store_binary_request(r):
+ new_b = []
+ for b in r.batches:
+ if not b.build_failed: new_b.append(b)
+ if new_b == []:
+ return
+ r.batches = new_b
+ # store new queue and max_req_no for binary builders
+ num = int(string.strip(open(path.max_req_no_file, "r").read())) + 1
+
+ r.no = num
+ q = B_Queue(path.req_queue_file)
+ q.lock(0)
+ q.read()
+ q.add(r)
+ q.write()
+ q.dump(path.queue_stats_file)
+ q.dump_html(path.queue_html_stats_file)
+ q.write_signed(path.req_queue_signed_file)
+ q.unlock()
+
+ (fdno, tmpfname) = tempfile.mkstemp(dir=os.path.dirname(path.max_req_no_file))
+ cnt_f = os.fdopen(fdno, "w")
+ cnt_f.seek(0)
+ cnt_f.write("%d\n" % num)
+ cnt_f.flush()
+ os.fsync(cnt_f.fileno())
+ cnt_f.close()
+ os.chmod(tmpfname, 0644)
+ os.rename(tmpfname, path.max_req_no_file)
+
+def transfer_file(r, b):
+ local = path.srpms_dir + '/' + r.id + "/" + b.src_rpm
+ f = b.src_rpm_file
+ # export files from chroot
+ chroot.cp(f, outfile = local, rm = True)
+ os.chmod(local, 0644)
+ ftp.add(local)
+
+ if config.gen_upinfo and 'test-build' not in r.flags:
+ fname = path.srpms_dir + '/' + r.id + "/" + b.src_rpm + ".uploadinfo"
+ f = open(fname, "w")
+ f.write("info:build:%s:requester:%s\ninfo:build:%s:requester_email:%s\nfile:SRPMS:%s\nEND\n" % (b.gb_id, b.requester, b.gb_id, b.requester_email, b.src_rpm))
+ f.close()
+ ftp.add(fname, "uploadinfo")
+
+def build_srpm(r, b):
+ if len(b.spec) == 0:
+ # should not really get here
+ util.append_to(b.logfile, "error: No .spec given but build src.rpm wanted")
+ return "FAIL"
+
+ status.push("building %s" % b.spec)
+
+ b.src_rpm = ""
+ builder_opts = "-nu -nm --nodeps --http"
+ if ("test-build" in r.flags) or b.branch and b.branch.startswith(config.tag_prefixes[0]):
+ tag_test=""
+ else:
+ tag_test=" -Tp %s -tt" % (config.tag_prefixes[0],)
+ cmd = ("cd rpm/packages; nice -n %s ./builder %s -bs %s -r %s %s %s %s 2>&1" %
+ (config.nice, builder_opts, b.bconds_string(), b.branch,
+ tag_test, b.kernel_string(), b.spec))
+ util.append_to(b.logfile, "request from: %s" % r.requester)
+ util.append_to(b.logfile, "started at: %s" % time.asctime())
+ util.append_to(b.logfile, "building SRPM using: %s\n" % cmd)
+ res = chroot.run(cmd, logfile = b.logfile)
+ util.append_to(b.logfile, "exit status %d" % res)
+ files = util.collect_files(b.logfile)
+ if len(files) > 0:
+ if len(files) > 1:
+ util.append_to(b.logfile, "error: More than one file produced: %s" % files)
+ res = "FAIL_TOOMANYFILES"
+ last = files[len(files) - 1]
+ b.src_rpm_file = last
+ b.src_rpm = os.path.basename(last)
+ r.chroot_files.extend(files)
+ else:
+ util.append_to(b.logfile, "error: No files produced.")
+ res = "FAIL"
+ if res == 0 and not "test-build" in r.flags:
+ for pref in config.tag_prefixes:
+ util.append_to(b.logfile, "Tagging with prefix: %s" % pref)
+ res = chroot.run("cd rpm/packages; ./builder -r %s -Tp %s -Tv %s" % \
+ (b.branch, pref, b.spec), logfile = b.logfile)
+ if res == 0:
+ transfer_file(r, b)
+
+ packagename = b.spec[:-5]
+ packagedir = "rpm/packages/%s" % packagename
+ chroot.run("rpm/packages/builder -m %s" % \
+ (b.spec,), logfile = b.logfile)
+ chroot.run("rm -rf %s" % packagedir, logfile = b.logfile)
+ status.pop()
+
+ if res:
+ res = "FAIL"
+ return res
+
+def handle_request(r):
+ os.mkdir(path.srpms_dir + '/' + r.id)
+ os.chmod(path.srpms_dir + '/' + r.id, 0755)
+ ftp.init(r)
+ buildlogs.init(r)
+ build.build_all(r, build_srpm)
+ report.send_report(r, is_src = True)
+ report.send_cia_report(r, is_src = True)
+ store_binary_request(r)
+ ftp.flush()
+ notify.send(r)
+
+def main():
+ init_conf("src")
+ if lock("building-srpm", non_block = 1) == None:
+ return
+ while True:
+ status.push("srpm: processing queue")
+ q = B_Queue(path.queue_file)
+ if not q.lock(1):
+ status.pop()
+ return
+ q.read()
+ if q.requests == []:
+ q.unlock()
+ status.pop()
+ return
+ r = pick_request(q)
+ q.write()
+ q.unlock()
+ status.pop()
+ status.push("srpm: handling request from %s" % r.requester)
+ handle_request(r)
+ status.pop()
+
+if __name__ == '__main__':
+ loop.run_loop(main)
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+state = []
+email = ""
+admin = ""
+builder_list = ""
+
+def push(s):
+ state.append(s)
+
+def pop():
+ state.pop()
+
+def get():
+ return "%s" % state
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import time
+import resource
+
+class Time:
+ def __init__(self):
+ x = resource.getrusage(resource.RUSAGE_CHILDREN)
+ self.user_time = x[0]
+ self.sys_time = x[1]
+ self.non_io_faults = x[6]
+ self.io_faults = x[7]
+ self.time = time.time()
+
+ def sub(self, x):
+ self.user_time -= x.user_time
+ self.sys_time -= x.sys_time
+ self.non_io_faults -= x.non_io_faults
+ self.io_faults -= x.io_faults
+ self.time -= x.time
+
+ def format(self):
+ return "user:%.2fs sys:%.2fs real:%.2fs (faults io:%d non-io:%d)" % \
+ (self.user_time, self.sys_time, self.time, self.io_faults,
+ self.non_io_faults)
+
+class Timer:
+ def __init__(self):
+ self.starts = []
+
+ def start(self):
+ self.starts.append(Time())
+
+ def stop(self):
+ tmp = Time()
+ tmp.sub(self.starts.pop())
+ return tmp.format()
+
+t = Timer()
+
+def start():
+ t.start()
+
+def stop():
+ return t.stop()
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import re
+import sys
+import os
+import log
+import string
+
+def uuid_python():
+ return str(uuid_random())
+
+def uuid_external():
+ f = os.popen("uuidgen 2>&1")
+ u = string.strip(f.read())
+ f.close()
+ if len(u) != 36:
+ raise Exception, "uuid: fatal, cannot generate uuid: %s" % u
+ return u
+
+# uuid module available in python >= 2.5
+try:
+ from uuid import uuid4 as uuid_random
+except ImportError:
+ uuid = uuid_external
+else:
+ uuid = uuid_python
+
+def pkg_name(nvr):
+ return re.match(r"(.+)-[^-]+-[^-]+", nvr).group(1)
+
+def msg(m):
+ sys.stderr.write(m)
+
+def sendfile(src, dst):
+ cnt = 0
+ while 1:
+ s = src.read(10000)
+ if s == "": break
+ cnt += len(s)
+ dst.write(s)
+ return cnt
+
+def append_to(log, msg):
+ f = open(log, "a")
+ f.write("%s\n" % msg)
+ f.close()
+
+def clean_tmp(dir):
+ # FIXME: use python
+ os.system("rm -f %s/* 2>/dev/null; rmdir %s 2>/dev/null" % (dir, dir))
+
+def collect_files(log):
+ f = open(log, 'r')
+ rx = re.compile(r"^Wrote: (/home.*\.rpm)$")
+ files = []
+ for l in f.xreadlines():
+ m = rx.search(l)
+ if m:
+ files.append(m.group(1))
+ f.close()
+ return files
+
+def find_last_section(log):
+ f = open(log, 'r')
+ rx1 = re.compile(r"^Executing\(%(\w+)\).*$")
+ rx2 = re.compile(r"^Processing (files):.*$")
+ last_section = None
+ for l in f:
+ m = rx1.search(l)
+ if not m:
+ m = rx2.search(l)
+ if m:
+ last_section = m.group(1)
+ f.close()
+ return last_section
--- /dev/null
+# vi: encoding=utf-8 ts=8 sts=4 sw=4 et
+
+import sys
+import log
+import traceback
+import StringIO
+import os
+import time
+
+# this module, as it deals with internal error handling shouldn't
+# import anything beside status
+import status
+
+try:
+ import mailer
+ def sendmail(trace):
+ m = mailer.Message()
+ m.set_headers(to = status.admin, cc = "%s, %s" % (status.email, status.builder_list), subject = "fatal python exception")
+ m.write("%s\n" % trace)
+ m.write("during: %s\n" % status.get())
+ m.send()
+except:
+ def sendmail(trace):
+ # don't use mailer.py; it safer this way
+ f = os.popen("/usr/sbin/sendmail -i -t", "w")
+ f.write("""Subject: builder failure
+To: %s
+Cc: %s, %s
+Date: %s
+X-PLD-Builder: fatal error report
+
+%s
+
+during: %s
+""" % (status.admin, status.email, status.builder_list,
+ time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()),
+ trace, status))
+ f.close()
+
+def wrap(main):
+ try:
+ main()
+ except:
+ exctype, value = sys.exc_info()[:2]
+ if exctype == SystemExit:
+ sys.exit(value)
+ s = StringIO.StringIO()
+ traceback.print_exc(file = s, limit = 20)
+
+ log.alert("fatal python exception")
+ log.alert(s.getvalue())
+ log.alert("during: %s" % status.get())
+
+ sendmail(s.getvalue())
+
+ sys.exit(1)
--- /dev/null
+tmp-chroot
--- /dev/null
+#!/bin/sh
+
+umask 077
+
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+if [ -d "$BUILDERPATH" ]; then
+ cd "$BUILDERPATH"
+else
+ echo "the $BUILDERPATH directory does not exist"
+ exit 1
+fi
+
+
+if [ -f "$BUILDERPATH/config/global" ]; then
+ . $BUILDERPATH/config/global
+fi
+
+if [ "$1" != "y" ] ; then
+ echo "this scripts kills current queue and installs new"
+ echo "run '$0 y' to run it"
+ exit 1
+fi
+
+mkdir -p spool/{builds,buildlogs,notify,ftp} www/srpms lock
+echo 0 > www/max_req_no
+echo 0 > spool/last_req_no
+echo -n > spool/processed_ids
+echo -n > spool/got_lock
+echo '<queue/>' > spool/queue
+echo '<queue/>' > spool/req_queue
+test ! -z "$binary_builders" && for bb in $binary_builders; do
+ echo '<queue/>' > spool/queue-$bb
+done
+
+chmod 755 www www/srpms
+chmod 644 www/max_req_no
--- /dev/null
+#!/bin/sh
+
+DIST="th"
+DISTTAG="PLD 3.0 (Th)"
+
+die () {
+ echo "$0: $*" 1>&2
+ cat 1>&2 <<EOF
+USAGE: $0 name1=val2 name2=val2 ...
+
+Variables:
+ chroot_type=src or chroot_type=bin (required)
+ chroot_dir=/path/to/chroot (required)
+ arch=i386 (required)
+ git_server=git://<host>/<project> (required in src builder)
+ git_user=<name> (required in src builder)
+ builder_uid=2000 (optional, uid of builder user
+ in chroot; defaults to current uid)
+EOF
+ exit 1
+}
+
+default_config () {
+ builder_pkgs="rpm-build poldek pwdutils net-tools which rpm-perlprov rpm-php-pearprov rpm-pythonprov bash vim"
+ builder_uid=`id -u`
+ dist_url="ftp://ftp.$DIST.pld-linux.org"
+
+ case "$chroot_type" in
+ src )
+ builder_arch_pkgs="wget gawk git-core"
+ ;;
+ bin )
+ builder_arch_pkgs="mount"
+ ;;
+ esac
+}
+
+check_conf () {
+ test "$chroot_dir" || die "no chroot_dir"
+ test "$arch" || die "no arch"
+ test "$dist_url" || die "no dist_url"
+
+ case "$chroot_type" in
+ src )
+ test "$git_server" || die "no git_server"
+ test "$git_user" || die "no git_user"
+ ;;
+ bin )
+ ;;
+ * )
+ die "evil chroot_type: $chroot_type"
+ ;;
+ esac
+}
+
+poldek_src () {
+ if test "$1" ; then
+ cat <<EOF
+[source]
+name=local
+type=pndir
+path=/spools/ready
+pri=1
+EOF
+ fi
+ cat <<EOF
+[source]
+name=main
+type=pndir
+path=$dist_url/dists/$DIST/PLD/$arch/RPMS/
+pri=6
+
+[source]
+name=main
+type=pndir
+path=$dist_url/dists/$DIST/PLD/noarch/RPMS/
+pri=6
+
+EOF
+}
+
+common_poldek_opt () {
+ cat <<EOF
+[global]
+particle_install = no
+greedy = yes
+rpmdef = _excludedocs 1
+EOF
+}
+
+chr() {
+ sudo chroot $chroot_dir su - root -c "$*"
+}
+
+chb() {
+ sudo chroot $chroot_dir su - builder -c "$*"
+}
+
+install_SPECS_builder () {
+ chr "mknod /dev/random -m 644 c 1 8"
+ chr "mknod /dev/urandom -m 644 c 1 9"
+ cat >install-specs <<EOF
+set -x
+rm -rf rpm
+mkdir rpm
+cd rpm
+git clone $git_server/rpm-build-tools rpm-build-tools
+./rpm-build-tools/builder.sh --init-rpm-dir
+echo "%packager PLD bug tracking system ( http://bugs.pld-linux.org/ )">~/.rpmmacros
+echo "%vendor PLD">>~/.rpmmacros
+echo "%distribution $DISTTAG">>~/.rpmmacros
+git config --global user.name $git_user
+git config --global user.email ${git_user}@pld-linux.org
+EOF
+ chb "sh" < install-specs
+ rm install-specs
+ echo "WARNING: Do not forget to install ssh keys to access git repo"
+}
+
+install_build_tree () {
+ cat >install-bt <<EOF
+set -x
+rm -rf rpm
+mkdir rpm
+cd rpm
+mkdir SPECS SOURCES SRPMS RPMS BUILD
+echo "%packager PLD bug tracking system ( http://bugs.pld-linux.org/ )">~/.rpmmacros
+echo "%vendor PLD">>~/.rpmmacros
+echo "%distribution $DISTTAG">>~/.rpmmacros
+EOF
+ chb "sh" < install-bt
+ rm install-bt
+}
+
+
+
+
+eval "$*" || usage
+default_config
+eval "$*"
+check_conf
+
+rm -rf tmp-chroot
+mkdir tmp-chroot
+cd tmp-chroot
+
+cat >poldek.conf <<EOF
+$(poldek_src)
+$(common_poldek_opt)
+cachedir = $chroot_dir/spools/poldek
+keep_downloads = no
+EOF
+
+cat > install-$chroot_name.sh <<EOF
+#!/bin/sh
+set -x
+cd $PWD
+rm -rf $chroot_dir
+mkdir -p $chroot_dir/spools/poldek
+mkdir $chroot_dir/dev
+mknod $chroot_dir/dev/null -m 666 c 1 3
+rpm --root $chroot_dir --initdb
+poldek --conf poldek.conf --root $chroot_dir --ask -i\
+ $builder_pkgs $builder_arch_pkgs
+EOF
+chmod 755 install-$chroot_name.sh
+
+echo "About to remove '$chroot_dir' and install it again, using"
+echo "install-$chroot_name.sh:"
+echo
+cat install-$chroot_name.sh
+echo
+cat <<EOF
+what to do?
+ r) the script was already ran; continue,
+ s) run it using sudo,
+ a) abort"
+EOF
+echo -n "[r/s/a]: "
+read ans
+case "$ans" in
+ r )
+ ;;
+ s )
+ sudo ./install-$chroot_name.sh
+ ;;
+ * )
+ echo "bye"
+ exit 1
+esac
+
+chr "ldconfig"
+
+echo "OK"
+echo "installing conf..."
+cat >poldek.conf <<EOF
+$(poldek_src local)
+$(common_poldek_opt)
+cachedir = /spools/poldek
+keep_downloads = no
+EOF
+
+chr "useradd -u "$builder_uid" -c 'PLD $chroot_name builder' -d /home/users/builder -m -g users -s /bin/sh builder"
+chr "cat > /etc/resolv.conf" < /etc/resolv.conf
+chr "cat > /etc/mtab" < /dev/null
+chr "mkdir -p /spools/ready/" < /dev/null
+chr "mkdir -p /spools/poldek/" < /dev/null
+chr "sed -e 's,^\(root:.*\)/bin/sh$,\1/bin/bash,' -i~ /etc/passwd"
+
+
+case $chroot_type in
+ src )
+ install_SPECS_builder
+ ;;
+ bin )
+ install_build_tree
+ ;;
+esac
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+cd $BUILDERPATH
+exec python PLD_Builder/file_sender.py
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+cd $BUILDERPATH
+exec python PLD_Builder/load_balancer.py
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+cd $BUILDERPATH
+exec python PLD_Builder/maintainer.py
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+cd $BUILDERPATH
+exec python PLD_Builder/request_fetcher.py
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+if lockfile -r3 $HOME/.builder_request_handler.lock 2>/dev/null; then
+ trap "rm -f $HOME/.builder_request_handler.lock" 1 2 3 13 15
+ cd $BUILDERPATH
+ python PLD_Builder/request_handler.py
+ rm -f $HOME/.builder_request_handler.lock
+else
+ return 1
+fi
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+cd $BUILDERPATH
+exec python PLD_Builder/rpm_builder.py $1
--- /dev/null
+#!/bin/sh
+
+umask 022
+export LC_CTYPE=en_US.iso-8859-1
+CONFIG=$HOME/.pldbuilderrc
+[ -f "$CONFIG" ] && . $CONFIG
+[ -n "$BUILDERPATH" ] || BUILDERPATH="$HOME/pld-builder.new/"
+export BUILDERPATH
+
+cd $BUILDERPATH
+exec python PLD_Builder/srpm_builder.py
--- /dev/null
+#!/bin/sh
+# generate rpm -qa without locking database
+
+set -e
+umask 077
+dir=`mktemp -d /tmp/db-XXXXXX`
+cp /var/lib/rpm/Packages $dir
+cp /var/lib/rpm/Name $dir
+rpm --dbpath $dir -qa | sort
+rm -rf $dir
+
--- /dev/null
+#!/bin/sh
+# Authors:
+# - Bartosz ÅšwiÄ…tek (shadzik@pld-linux.org)
+# - Elan Ruusamäe (glen@pld-linux.org)
+#
+# helps sending kde4 specs in proper order with or without autotags
+
+usage() {
+ echo "Usage: $0 OPTIONS SPECS"
+ echo ""
+ echo "Where OPTIONS are:"
+ echo ""
+ echo " -d --distro VALUE"
+ echo " set distro, probably th or ti will fit the most"
+ echo " -at --with-auto-tag"
+ echo " send with current autotag, default no"
+ echo " -b --builder VALUE"
+ echo " choose a particular builder, default all"
+ echo " -p --priority VALUE (default: 2)"
+ echo " -h --help"
+ echo " show this help"
+ echo ""
+ echo "Choose SPECS out of:"
+ echo ""
+ echo "all - all kde4-* (libs, base, other, koffice, l10n)"
+ echo "libs - kde4-kdelibs and kde4-kdepimlibs"
+ echo "base - kde4-kdebase* kde4-oxygen-icons"
+ echo "other - all other kde4-* except libs and base"
+ echo "koffice - kde4-koffice"
+ echo "l10n - kde4-l10n"
+ echo "kdevelop - kde4-devplatform, kde4-kdevelop-*"
+ echo "almost-all - all but koffice and l10n"
+ echo ""
+ exit 0
+}
+
+DIST=
+ATAG=no
+SENDPRIO=
+BUILDER=
+PRIO=2
+#SPECDIR=$(rpm -E %_specdir)
+SPECDIR=~/rpm
+
+LIBS="kde4-kdelibs.spec kde4-kdepimlibs.spec"
+BASE="kde4-oxygen-icons.spec kde4-kdebase-runtime.spec kde4-kdebase-workspace.spec kde4-kwebkitpart.spec kde4-kdebase.spec"
+OTHER="kde4-kdemultimedia.spec kde4-kdegraphics.spec \
+kde4-kdenetwork.spec \
+kde4-kdepim.spec \
+kde4-kdepim-runtime.spec \
+kde4-kdeartwork.spec \
+kde4-kdewebdev.spec \
+kde4-kdeutils.spec \
+kde4-kdeaccessibility.spec \
+kde4-kdebindings.spec \
+kde4-kdegames.spec \
+kde4-kdeedu.spec \
+kde4-kdeplasma-addons.spec \
+kde4-kdesdk.spec \
+kde4-kdeadmin.spec \
+kde4-kdetoys.spec"
+KOFFICE="kde4-koffice.spec kde4-koffice-l10n.spec"
+L10N="kde4-l10n.spec"
+KDEVELOP="kde4-kdevplatform.spec \
+kde4-kdevelop.spec
+kde4-kdevelop-plugin-php.spec"
+
+while [ $# -gt 0 ]; do
+ case "$1" in
+ --distro | -d )
+ DIST=$2
+ shift
+ ;;
+
+ --with-auto-tag | -at )
+ ATAG=yes
+ ;;
+
+ --builder | -b )
+ BUILDER="$BUILDER $2"
+ shift
+ ;;
+
+ --priority | -p )
+ PRIO=$2
+ shift
+ ;;
+
+ --help | -h )
+ usage
+ ;;
+
+ -* )
+ die "Unknow option: $1"
+ ;;
+
+ *:* | * )
+ specs="$specs $1"
+ ;;
+ esac
+ shift
+done
+
+specs=`for s in $specs; do
+ case "$s" in
+ all) # all kde4 specs
+ echo $LIBS $BASE $OTHER $KOFFICE $L10N
+ ;;
+ libs) # kde4 libs, libs-experimental and pimlibs
+ echo $LIBS
+ ;;
+ base) # kde4-kdebase-*
+ echo $BASE
+ ;;
+ other) # kde4-*
+ echo $OTHER
+ ;;
+ koffice) # kde4-koffice
+ echo $KOFFICE
+ ;;
+ l10n) # kde4-l10n
+ echo $L10N
+ ;;
+ kdevelop) # kde4-kdevplatform and kde4-kdevelop-*
+ echo $KDEVELOP
+ ;;
+ almost-all) # all but koffice and l10n
+ echo $LIBS $BASE $OTHER
+ ;;
+ *) # not listed ones
+ echo $s
+ ;;
+ esac
+done`
+
+if [ "$DIST" == "ti-dev" ]; then
+ disttag="ti"
+else
+ disttag=$DIST
+fi
+
+if [ "$ATAG" == "yes" ]; then
+ for spec in $specs; do
+ PKG=$(echo $spec |sed -e 's/.spec//g')
+ LAST_AUTOTAG=$(cd $SPECDIR/packages && ./builder -g -ns $PKG/$spec >/dev/null 2>&1 && cvs status -v $PKG/$spec | awk -vdist=$disttag '!/Sticky/ && $1 ~ "^auto-" dist "-"{if (!a++) print $1}')
+ sleep 1
+ SENDPRIO="$SENDPRIO $spec:$LAST_AUTOTAG "
+ done
+else
+ SENDPRIO=$specs
+fi
+
+dir=$(dirname "$0")
+exec $dir/make-request.sh ${DIST:+-d $DIST} ${BUILDER:+-b "$BUILDER"} -p $PRIO -r $SENDPRIO
+echo >&2 "Failed to execute ./make-request.sh!"
+exit 1
--- /dev/null
+#!/bin/sh
+
+# prevent "*" from being expanded in builders var
+set -f
+
+builders=
+with=
+without=
+flags=
+command=
+command_flags=
+gpg_opts=
+default_branch='HEAD'
+dist=
+url=
+no_depend=no
+verbose=no
+autotag=no
+
+if [ -x /usr/bin/python ]; then
+ send_mode="python"
+else
+ echo "No python present, using mail mode"
+ send_mode="mail"
+fi
+
+if [ -n "$HOME_ETC" ]; then
+ USER_CFG=$HOME_ETC/.requestrc
+else
+ USER_CFG=$HOME/.requestrc
+fi
+
+if [ ! -f "$USER_CFG" ]; then
+ echo "Creating config file $USER_CFG. You *must* edit it."
+ cat > $USER_CFG <<EOF
+priority=2
+requester=deviloper@pld-linux.org
+default_key=deviloper@pld-linux.org
+send_mode="$send_mode"
+url="$url"
+mailer="/usr/sbin/sendmail -t"
+gpg_opts=""
+dist=th
+url="http://src.th.pld-linux.org:1234/"
+
+# defaults:
+f_upgrade=yes
+EOF
+exit
+fi
+
+if [ -f "$USER_CFG" ]; then
+ . $USER_CFG
+ # legacy fallback
+ if [ "${distro:+set}" = "set" ]; then
+ dist=$distro
+ fi
+fi
+
+# internal options, not to be overriden
+specs=
+df_fetch=no
+upgrade_macros=no
+
+# Set colors
+c_star=$(tput setaf 2)
+c_red=$(tput setaf 1)
+c_norm=$(tput op)
+msg() {
+ echo >&2 "${c_star}*${c_norm} $*"
+}
+red() {
+ echo "${c_red}$*${c_norm}"
+}
+
+die() {
+ echo >&2 "$0: $*"
+ exit 1
+}
+
+send_request() {
+ # switch to mail mode, if no url set
+ [ -z "$url" ] && send_mode="mail"
+
+ case "$send_mode" in
+ "mail")
+ msg "Sending using mail mode"
+ cat - | $mailer
+ ;;
+ *)
+ msg "Sending using http mode to $url"
+ cat - | python -c '
+import sys, socket, urllib2
+
+try:
+ data = sys.stdin.read()
+ url = sys.argv[1]
+ socket.setdefaulttimeout(10)
+ req = urllib2.Request(url, data)
+ f = urllib2.urlopen(req)
+ f.close()
+except Exception, e:
+ print >> sys.stderr, "Problem while sending request via HTTP: %s: %s" % (url, e)
+ sys.exit(1)
+print >> sys.stdout, "Request queued via HTTP."
+' "$url"
+ ;;
+ esac
+}
+
+# simple df_fetcher, based on packages/fetchsrc_request
+# TODO: tcp (smtp) mode
+# TODO: adjust for ~/.requestrc config
+df_fetch() {
+ local specs="$@"
+
+ # Sending by
+ local MAILER='/usr/sbin/sendmail'
+ # MAILER='/usr/bin/msmtp'
+ # Sending via
+ local VIA="SENDMAIL"
+ #VIA="localhost"
+ local VIA_ARGS=""
+ #VIA_ARGS="some additional flags"
+ # e.g. for msmtp:
+ # VIA_ARGS='-a gmail'
+ #
+ # DISTFILES EMAIL
+ local DMAIL="distfiles@pld-linux.org"
+
+ local HOST=$(hostname -f)
+ local LOGIN=${requester%@*}
+
+ for spec in $specs; do
+ local SPEC=$(echo "$spec" | sed -e 's|:.*||')
+ local BRANCH=$(echo "$spec" | sed -e 's|.*:||')
+ echo >&2 "Distfiles Request: $SPEC:$BRANCH via $MAILER ${VIA_ARGS:+ ($VIA_ARGS)}"
+ cat <<-EOF | "$MAILER" -t -i $VIA_ARGS
+ To: $DMAIL
+ From: $LOGIN <$LOGIN@$HOST>
+ Subject: fetchsrc_request notify
+ X-CVS-Module: SPECS
+ X-distfiles-request: yes
+ X-Login: $LOGIN
+ X-Spec: $SPEC
+ X-Branch: $BRANCH
+ X-Flags: force-reply
+
+ .
+ EOF
+ done
+}
+
+# autotag from rpm-build-macros
+# displays latest used tag for a specfile
+autotag() {
+ local out s
+ for s in "$@"; do
+ # strip branches
+ s=${s%:*}
+ # ensure package ends with .spec
+ s=${s%.spec}.spec
+ out=$(cvs status -v $s | awk "!/Sticky/&&/auto-$dist-/{if (!a++) print \$1}")
+ echo "$s:$out"
+ done
+}
+
+# get autotag for specs
+# WARNING: This may checkout some files from CVS
+get_autotag() {
+ local pkg spec rpmdir
+
+ rpmdir=$(rpm -E %_topdir)
+ cd $rpmdir
+ for pkg in "$@"; do
+ # strip branches
+ pkg=${pkg%:*}
+ # strip .spec extension
+ pkg=${pkg%.spec}
+ # checkout only if missing
+ if [ ! -e $pkg/$pkg.spec ]; then
+ $rpmdir/builder -g $pkg -ns -r HEAD 1>&2
+ fi
+ if [ ! -e $pkg/$pkg.spec ]; then
+ # just print it out, to fallback to base pkg name
+ echo "$pkg"
+ else
+ autotag $pkg/$pkg.spec
+ fi
+ done
+}
+
+usage() {
+ cat <<EOF
+Usage: make-request.sh [OPTION] ... [SPECFILE] ....
+
+Mandatory arguments to long options are mandatory for short options too.
+
+ --config-file /path/to/config/file
+ Source additional config file (after $USER_CFG), useful when
+ when sending build requests to Ac/Th from the same account
+ -a
+ Try to use latest auto-tag for the spec when building
+ WARNING: This will checkout new files to your packages dir
+ -b 'BUILDER BUILDER ...', --builder='BUILDER BUILDER ...'
+ Sends request to given builders (in 'version-arch' format)
+ --with VALUE, --without VALUE
+ Build package with(out) a given bcond
+ --kernel VALUE
+ set alt_kernel to VALUE
+ --target VALUE
+ set --target to VALUE
+ -s BUILD_ID, --skip BUILD_ID[,BUILD_ID][,BUILD_ID]
+ mark build ids on src builder to be skipped
+ --branch VALUE
+ specify default branch for specs in request
+ -t, --test-build
+ Performs a 'test-build'. Package will be uploaded to hidden .test-builds/
+ ftp tree and won't be upgraded on builders.
+ -r, --ready-build
+ Preforms a 'ready' build. Package will be built and uploaded to test/ ftp tree
+ (and later moved by release manager staff to ready/ and main ftp tree)
+ -u, --upgrade
+ Forces package upgrade (for use with -c or -q, not -t)
+ -n, --no-upgrade
+ Disables package upgrade (for use with -r)
+ -ni, --no-install-br
+ Do not install missing BuildRequires (--nodeps)
+ -nd, --no-depend
+ Do not add dependency of build jobs, each job in batch runs itself
+ -j, --jobs
+ Number of parallel jobs for single build
+ -f, --flag
+ -d, --dist DISTRIBUTION_ID
+ Specify value for \$dist
+ -df, --distfiles-fetch[-request] PACKAGE
+ Send distfiles request to fetch sources for PACKAGE
+ -cf, --command-flag
+ Not yet documented
+ -c, --command
+ Executes a given command on builders (prepended to build jobs if build jobs included)
+ -C, --post-command
+ Executes a given command on builders (appended to build jobs if build jobs included)
+ --test-remove-pkg
+ shortcut for --command poldek -evt ARGS
+ --remove-pkg
+ shortcut for --command poldek -ev --noask ARGS
+ --upgrade-pkg
+ shortcut for --command poldek --up -Uv ARGS
+ --pull
+ Updates builders infrastructure (outside chroot)
+ --update-macros
+ Updates rpm-build-macros on src builder
+ -q
+ shortcut for --command rpm -q ARGS
+ -g, --gpg-opts "opts"
+ Pass additional options to gpg binary
+ -p, --priority VALUE
+ sets request priority (default 2)
+ -h, --help
+ Displays this help message
+EOF
+ exit 0
+}
+
+# validate distro, set $dist
+set_dist() {
+ case "$1" in
+ ac)
+ ;;
+ ac-java|ac-xen)
+ ;;
+ ti)
+ ;;
+ ti-dev)
+ ;;
+ th)
+ ;;
+ th-java)
+ ;;
+ aidath)
+ ;;
+ *)
+ die "dist \`$1' not known"
+ ;;
+ esac
+
+ dist=$1
+}
+
+while [ $# -gt 0 ] ; do
+ case "$1" in
+ -d | --dist | --distro)
+ set_dist $2
+ shift
+ ;;
+
+ --config-file)
+ [ -f "$2" ] && . $2 || die "Config file not found"
+ shift
+ ;;
+
+ --builder | -b)
+ for b in $2; do
+ builders="$builders ${b%:*}"
+ done
+ shift
+ ;;
+
+ -a)
+ autotag=yes
+ ;;
+
+ --with)
+ with="$with $(echo "$2" | tr ',' ' ')"
+ shift
+ ;;
+
+ --without)
+ without="$without $(echo "$2" | tr ',' ' ')"
+ shift
+ ;;
+
+ --test-build | -t)
+ build_mode=test
+ f_upgrade=no
+ ;;
+
+ --kernel)
+ kernel=$2
+ shift
+ ;;
+
+ --target)
+ target=$2
+ shift
+ ;;
+
+ -s|--skip)
+ skip="$2"
+ shift
+ ;;
+
+ --branch)
+ branch=$2
+ shift
+ ;;
+
+ --priority | -p)
+ priority=$2
+ shift
+ ;;
+
+ --ready-build | -r)
+ build_mode=ready
+ ;;
+
+ --upgrade | -u)
+ f_upgrade=yes
+ ;;
+
+ --no-upgrade | -n)
+ f_upgrade=no
+ ;;
+
+ --no-depend | -nd)
+ no_depend=yes
+ ;;
+
+ --no-install-br | -ni)
+ flags="$flags no-install-br"
+ ;;
+
+ -j | --jobs)
+ jobs="$2"
+ shift
+ ;;
+
+ -j*)
+ jobs="${1#-j}"
+ ;;
+
+ -v)
+ verbose=yes
+ ;;
+
+ --flag | -f)
+ flags="$flags $2"
+ shift
+ ;;
+
+ --command-flags | -cf)
+ command_flags="$2"
+ shift
+ ;;
+
+ --command | -c)
+ command="$2"
+ if [ "$command" = - ]; then
+ echo >&2 "Reading command from STDIN"
+ echo >&2 "---"
+ command=$(cat)
+ echo >&2 "---"
+ fi
+ shift
+ ;;
+ --post-command | -C)
+ post_command="$2"
+ if [ "$post_command" = - ]; then
+ echo >&2 "Reading post_command from STDIN"
+ echo >&2 "---"
+ post_command=$(cat)
+ echo >&2 "---"
+ fi
+ shift
+ ;;
+ --test-remove-pkg)
+ command="poldek -evt $2"
+ f_upgrade=no
+ shift
+ ;;
+ --remove-pkg)
+ command="for a in $2; do poldek -ev --noask \$a; done"
+ f_upgrade=no
+ shift
+ ;;
+ --upgrade-pkg|-Uhv)
+ command="poldek --up; poldek -uv $2"
+ f_upgrade=no
+ shift
+ ;;
+ -q)
+ command="rpm -q $2"
+ f_upgrade=no
+ shift
+ ;;
+
+ --pull)
+ command_flags="no-chroot"
+ command="git pull"
+ f_upgrade=no
+ ;;
+
+ --update-macros)
+ upgrade_macros="yes"
+ ;;
+
+ -df | --distfiles-fetch | --distfiles-fetch-request)
+ df_fetch=yes
+ ;;
+
+ --gpg-opts | -g)
+ gpg_opts="$2"
+ shift
+ ;;
+
+ --help | -h)
+ usage
+ ;;
+
+ -*)
+ die "unknown knob: $1"
+ ;;
+
+ *:* | *)
+ specs="$specs $1"
+ ;;
+ esac
+ shift
+done
+
+case "$dist" in
+ac)
+ builder_email="builder-ac@pld-linux.org"
+ default_builders="ac-*"
+ default_branch="AC-branch"
+ url="http://ep09.pld-linux.org:1289/"
+ control_url="http://ep09.pld-linux.org/~buildsrc"
+ ;;
+ac-java) # fake "dist" for java available ac architectures
+ builder_email="builder-ac@pld-linux.org"
+ default_builders="ac-i586 ac-i686 ac-athlon ac-amd64"
+ default_branch="AC-branch"
+ url="http://ep09.pld-linux.org:1289/"
+ ;;
+ac-xen) # fake "dist" for xen-enabled architectures
+ builder_email="builder-ac@pld-linux.org"
+ default_builders="ac-i686 ac-athlon ac-amd64"
+ default_branch="AC-branch"
+ ;;
+ti)
+ builder_email="builderti@ep09.pld-linux.org"
+ default_builders="ti-*"
+ url="http://ep09.pld-linux.org:1231/"
+ control_url="http://ep09.pld-linux.org/~builderti"
+ ;;
+ti-dev)
+ builder_email="buildertidev@ep09.pld-linux.org"
+ default_builders="ti-dev-*"
+ url="http://ep09.pld-linux.org:1232/"
+ control_url="http://ep09.pld-linux.org/~buildertidev"
+ ;;
+th)
+ builder_email="builderth@pld-linux.org"
+ default_builders="th-*"
+ url="http://src.th.pld-linux.org:1234/"
+ control_url="http://src.th.pld-linux.org"
+ ;;
+th-java) # fake "dist" for java available th architectures
+ builder_email="builderth@pld-linux.org"
+ default_builders="th-x86_64 th-athlon th-i686"
+ url="http://src.th.pld-linux.org:1234/"
+ ;;
+aidath)
+ builder_email="builderaidath@ep09.pld-linux.org"
+ default_builders="aidath-*"
+ ;;
+*)
+ die "dist \`$dist' not known"
+ ;;
+esac
+
+# need to do this after dist selection
+if [ "$skip" ]; then
+ skip=$(skip="$skip" control_url="$control_url" python -c '
+import urllib2
+import sys
+import StringIO
+import gzip
+import re
+import os
+import string
+from xml.dom import minidom
+
+skip = os.environ.get("skip").split(",");
+control_url = os.environ.get("control_url")
+
+print >> sys.stderr, "* Check queue_id-s against %s" % control_url
+
+try:
+ headers = { "Cache-Control": "no-cache", "Pragma": "no-cache" }
+ req = urllib2.Request(url=control_url + "/queue.gz", headers=headers)
+ f = urllib2.urlopen(req)
+except Exception, e:
+ print >> sys.stderr, "Fetch error %s: %s" % (control_url + "/queue.gz", e)
+ sys.exit(1)
+
+sio = StringIO.StringIO()
+sio.write(f.read())
+f.close()
+sio.seek(0)
+f = gzip.GzipFile(fileobj = sio)
+
+xml = re.compile("(<queue>.*?</queue>)", re.DOTALL).match(f.read()).group(1)
+d = minidom.parseString(xml)
+
+q = []
+for c in d.documentElement.childNodes:
+ if c.nodeName != "group":
+ continue
+ q.append(c.attributes["id"].value)
+
+err = 0
+for s in skip:
+ if s not in q:
+ print >> sys.stderr, "- Check %s: ERROR: Not valid queue-id" % s
+ err = 1
+ else:
+ print >> sys.stderr, "- Check %s: OK" % s
+if err == 1:
+ sys.exit(1)
+print string.join(skip, ",")
+') || exit $?
+ f_upgrade=no
+ build_mode=test
+ priority=-1
+ command="skip:$skip"
+ command_flags="no-chroot"
+ builders="$dist-src"
+fi
+
+branch=${branch:-$default_branch}
+
+specs=`for s in $specs; do
+ case "$s" in
+ ^)
+ # skip marker - pass it along
+ echo $s
+ ;;
+ *.spec:*) # spec with branch
+ basename $s
+ ;;
+ *.spec) # spec without branch
+ echo $(basename $s):$branch
+ ;;
+ *:*) # package name with branch
+ basename $s | sed -e 's/:/.spec:/'
+ ;;
+ *) # just package name
+ echo $(basename $s).spec:$branch
+ ;;
+ esac
+done`
+
+if [ "$autotag" = "yes" ]; then
+ msg "Auto autotag build enabled"
+ specs=$(get_autotag $specs)
+fi
+
+if [ "$df_fetch" = "yes" ]; then
+ df_fetch $specs
+ exit 0
+fi
+
+if [ "$upgrade_macros" = "yes" ]; then
+ command="poldek --up; poldek -uv rpm-build-macros"
+ builders="$dist-src"
+ f_upgrade=no
+ build_mode=test
+fi
+
+if [[ "$requester" != *@* ]] ; then
+ requester="$requester@pld-linux.org"
+fi
+
+if [ -z "$builders" ] ; then
+ builders="$default_builders"
+fi
+
+if [ "$f_upgrade" = "yes" ] ; then
+ flags="$flags upgrade"
+fi
+
+if [ "$build_mode" = "test" ] ; then
+ if [ "$f_upgrade" = "yes" ] ; then
+ die "--upgrade and --test-build are mutually exclusive"
+ fi
+ flags="$flags test-build"
+fi
+
+if [ -z "$build_mode" ] ; then
+ # missing build mode, builders go crazy when you proceed"
+ die "please specify build mode"
+fi
+
+
+ok=
+for s in $specs; do
+ ok=1
+done
+
+if [ -z "$specs" -a -z "$command" ]; then
+ die "no packages to build or command to invoke specified"
+fi
+
+id=$(uuidgen)
+
+gen_req() {
+ echo "<group id='$id' no='0' flags='$flags'>"
+ echo " <time>$(date +%s)</time>"
+ msg "Using priority $priority"
+ echo " <priority>$priority</priority>"
+ if [ -n "$jobs" ]; then
+ msg "Using jobs $jobs"
+ echo " <maxjobs>$jobs</maxjobs>"
+ fi
+ if [ -z "$url" ]; then
+ msg "Using email $builder_email"
+ else
+ msg "Using URL $url"
+ fi
+
+ if [ "$build_mode" = "ready" ]; then
+ msg "Build mode: $(tput setaf 2)$build_mode$c_norm"
+ else
+ msg "Build mode: $(tput setaf 3)$build_mode$c_norm"
+ fi
+
+ msg "Queue-ID: $id"
+ echo
+
+ # job to depend on
+ local depend=
+ local b i=1
+ local name branch builders_xml
+
+ for b in $builders; do
+ msg "Builder: $(red $b)"
+ builders_xml="$builders_xml <builder>$b</builder>"
+ done
+
+ if [ "$command" ]; then
+ bid=$(uuidgen)
+ echo -E >&2 "* Command: $command"
+ echo " <batch id='$bid' depends-on=''>"
+ echo " <command flags='$command_flags'>"
+ echo -E "$command" | sed -e 's,&,\&,g;s,<,\<,g;s,>,\>,g'
+ echo "</command>"
+ echo " <info></info>"
+ echo "$builders_xml"
+ echo " </batch>"
+ depend=$bid
+ fi
+
+ if [ "$f_upgrade" = "yes" ] ; then
+ msg "Upgrade mode: $f_upgrade"
+ fi
+
+ for s in $specs; do
+ # skip marker
+ if [ "$s" = "^" ]; then
+ depend=
+ continue
+ fi
+ if [ "$no_depend" = yes ]; then
+ depend=
+ fi
+ bid=$(uuidgen)
+ echo " <batch id='$bid' depends-on='$depend'>"
+
+ name=$(echo "$s" | sed -e 's|:.*||')
+ branch=$(echo "$s" | sed -e 's|.*:||')
+ msg "Adding #$i $name:$branch${kernel:+ alt_kernel=$kernel}${target:+ target=$target}${depend:+ depends on $depend}"
+ echo " <spec>$name</spec>"
+ echo " <branch>$branch</branch>"
+ echo " ${kernel:+<kernel>$kernel</kernel>}"
+ echo " ${target:+<target>$target</target>}"
+ echo " <info></info>"
+ echo
+ for b in $with; do
+ echo " <with>$b</with>"
+ done
+ for b in $without; do
+ echo " <without>$b</without>"
+ done
+ echo
+ echo "$builders_xml"
+ echo " </batch>"
+ i=$((i+1))
+
+ # let next job depend on previous
+ depend=$bid
+ done
+
+ if [ "$post_command" ]; then
+ bid=$(uuidgen)
+ if [ "$no_depend" = yes ]; then
+ depend=
+ fi
+ echo -E >&2 "* Post-Command: $post_command"
+ echo " <batch id='$bid' depends-on='$depend'>"
+ echo " <command flags='$command_flags'>"
+ echo -E "$post_command" | sed -e 's,&,\&,g;s,<,\<,g;s,>,\>,g'
+ echo "</command>"
+ echo " <info></info>"
+ echo "$builders_xml"
+ echo " </batch>"
+ depend=$bid
+ fi
+
+ echo "</group>"
+}
+
+gen_email () {
+ # make request first, so the STDERR/STDOUT streams won't be mixed
+ local tmp req
+ tmp=$(mktemp)
+ gen_req > $tmp
+
+ if [ "$verbose" = "yes" ]; then
+ cat $tmp >&2
+ fi
+
+ cat <<-EOF
+ From: $requester
+ To: $builder_email
+ Subject: build request
+ Message-Id: <$id@$(hostname)>
+ X-New-PLD-Builder: request
+ X-Requester-Version: \$Id$
+
+ EOF
+
+ gpg --clearsign --default-key $default_key $gpg_opts --output=- $tmp
+ rm -f $tmp
+}
+
+gen_email | send_request
--- /dev/null
+#!/bin/sh
+set -e
+
+dir=$(cd "$(dirname "$0")"; pwd)
+rpmdir=$(rpm -E %_topdir)
+dist=th
+
+pkgs_head="
+ dahdi-linux
+ e1000e
+ igb
+ ipset
+ ixgbe
+ linuxrdac
+ lirc
+ madwifi-ng
+ open-vm-tools
+ r8168
+ VirtualBox
+ xorg-driver-video-fglrx
+ xorg-driver-video-fglrx-legacy-12.x
+ xorg-driver-video-nvidia
+ xtables-addons
+"
+
+pkgs_longterm="
+ iscsitarget
+ openvswitch
+ xorg-driver-video-nvidia-legacy3
+"
+
+# autotag from rpm-build-macros
+# displays latest used tag for a specfile
+autotag() {
+ local out spec pkg
+ for spec in "$@"; do
+ # strip branches
+ pkg=${spec%:*}
+ # ensure package ends with .spec
+ spec=${pkg%.spec}.spec
+ # and pkg without subdir
+ pkg=${pkg#*/}
+ # or .ext
+ pkg=${pkg%%.spec}
+ out=$(cvs status -v $spec | awk "!/Sticky/&&/auto-$dist-$pkg-$alt_kernel/{if (!a++) print \$1}")
+ echo "$spec:$out"
+ done
+}
+
+get_last_tags() {
+ local pkg spec
+
+ echo >&2 "Fetching package tags: $*..."
+ for pkg in "$@"; do
+ echo >&2 "$pkg... "
+ if [ ! -e $pkg/$pkg.spec ]; then
+ $rpmdir/builder -g $pkg -ns -r HEAD 1>&2
+ fi
+ if [ ! -e $pkg/$pkg.spec ]; then
+ # just print it out, to fallback to base pkg name
+ echo "$pkg"
+ else
+ spec=$(autotag $pkg/$pkg.spec)
+ spec=${spec#*/}
+ echo >&2 "... $spec"
+ echo $spec
+ fi
+ done
+}
+
+cd $rpmdir
+case "$1" in
+ head)
+ kernel=$(get_last_tags kernel)
+ kernel=$(echo ${kernel#*auto-??-} | tr _ .)
+ specs=""
+ for pkg in $pkgs_head; do
+ echo >&2 "Rebuilding $pkg..."
+ $rpmdir/builder -g $pkg -ns
+ $rpmdir/relup.sh -m "rebuild for $kernel" -ui $pkg/$pkg.spec
+ specs="$specs $pkg.spec"
+ done
+ $dir/make-request.sh -nd -r -d $dist $specs
+ ;;
+ longterm)
+ kernel=$(alt_kernel=longterm get_last_tags kernel)
+ kernel=$(echo ${kernel#*auto-??-} | tr _ .)
+ specs=""
+ for pkg in $pkgs_longterm; do
+ echo >&2 "Rebuilding $pkg..."
+ $rpmdir/builder -g $pkg -ns
+ $rpmdir/relup.sh -m "rebuild for $kernel" -ui $pkg/$pkg.spec
+ specs="$specs $pkg.spec"
+ done
+ # first build with main pkg (userspace), later build from tag
+ $dir/make-request.sh -nd -r -d $dist --without kernel $specs
+
+ specs=$(get_last_tags $pkgs_head $pkgs_longterm)
+ $dir/make-request.sh -nd -r -d $dist --kernel longterm --without userspace $specs
+ ;;
+ *)
+ # try to parse all args, filling them with last autotag
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ --kernel|--with|--without)
+ args="$1 $2"
+ shift
+ ;;
+ -*)
+ args="$args $1"
+ ;;
+ *)
+ specs="$specs $1"
+ ;;
+ esac
+ shift
+ done
+ specs=$(get_last_tags $specs)
+ $dir/make-request.sh -nd -r -d $dist $args $specs
+ ;;
+esac
--- /dev/null
+#!/bin/sh
+set -e
+
+dist=th
+
+dir=$(dirname $0)
+
+$dir/make-request.sh -d $dist -r -a "$@"
--- /dev/null
+#! /usr/bin/python
+
+# As omta segfaults and rest is so huge and does't work out of box
+# mailer="./smtpwrapper.py" or whatever path is
+
+smtp_host = "beauty.ant.vpn"
+
+import smtplib,sys
+
+msg = sys.stdin.read()
+
+server = smtplib.SMTP(smtp_host)
+# server.set_debuglevel(1)
+server.sendmail("matkor@pld-linux.org","builder-ac@pld-linux.org", msg) # Adresses should be taken from .requestrc
+server.quit()
+
--- /dev/null
+#
+# Access Control Lists for PLD builder
+#
+# Format:
+#
+# [login]: login of user, used in messages
+#
+# gpg_emails: list of emails used in GPG signatures
+# Message is considered to be sent by this user if *any* of emails
+# listed is seen in "gpg: Good signature from ...<email>"
+# It is therefore important not to add any fake signatures to
+# keyring.
+#
+# mailto: where to send status notification for this user.
+# If not present gpg_emails[0] is assumed.
+#
+# privs: list of privileges. List is scanned left to right. If no
+# match is found access is denied. ! before item denies access.
+# Items have format <what-action>:<which-builder>[:<what-branch>],
+# where all three are shell wildcards and by default any branch is allowed.
+# <what-branch> may be only specified for: src, binary, ready and upgrade
+#
+# Actions:
+# src -- build src rpm (only makes sense if <which-builder> is src
+# builder)
+# binary -- build binary rpm
+# notify -- can send notification about build process on given builder.
+# Used in entries for binary builders.
+# ready -- can start non-test builds
+# upgrade -- can start package upgrades
+# <number> -- minimum (highest) priority allowed (default: 10)
+#
+# Example:
+# Allow access for binary builders, except for security builders.
+# Also allow building src rpms (without it binary:* wouldn't make much
+# sense). Lowest priority allowe will be 3.
+#
+# [foo]
+# gpg_emails = foo@pld-linux.org Foo.Bar@blah.com
+# mailto foo-builder@blah.com
+# privs = src:src !binary:security-* binary:* 3:*
+#
+
+[bin_builder_roke]
+gpg_emails = bin_builder@roke.freak
+privs = notify:*
+
+[src_builder_roke]
+gpg_emails = srpms_builder@roke.freak
+privs = sign_queue:* notify:*
+
+[malekith]
+gpg_emails = malekith@pld-linux.org
+mailto = malekith@roke.freak
+privs = src:roke-src binary:roke-*:AC-branch
--- /dev/null
+[all]
+# Builder email (put in From: field when this builder is sending mails).
+email = builder-th@pld-linux.org
+
+# Admin's email (she gets mails about errors and such).
+admin_email = th-admin@pld-linux.org
+
+# To be placed in Cc: of status and error reports (a mailing list usually).
+builder_list =
+
+# List of binary builders. This thing must be present on both src and bin
+# builders.
+binary_builders = th-athlon
+
+# Name of the src builder (this field is ignored on binary builders).
+src_builder = th-src
+
+# Logs always go to spool/log, if you want syslog in addition,
+# specify facility here (user or local0-7 is the best choice probably).
+# If you don't want syslog, leave it empty.
+syslog =
+
+# Wrapper command for sudo chroot (used eg on sparc).
+#sudo_chroot_wrapper = sparc32
+sudo_chroot_wrapper =
+
+# Keep rpms in /spools/ready inside chroots for how long (in hours).
+max_keep_time = 168
+
+# TCP port number for request_handler_server.py. ignored on bin builders
+request_handler_server_port = 1234
+
+# Send '*.uploadinfo' files along with built rpms (used by pld-ftp-admin), but
+# probably not usefull for small, 'personal' builders. [yes/no]
+gen_upinfo = yes
+
+# CIA bot (see cia.navi.cx).
+#bot_email = cia@pld-linux.org
+
+# ------ Binary builders config:
+# How many jobs can be run at once (how many concurrent builds).
+job_slots = 1
+
+# Maximal load, above which builds won't get started.
+max_load = 10.0
+
+# make -jX for single build
+max_jobs = 1
+
+# Nice control.
+#nice = 0
+
+# Where to look for queue.gz and counter (published through www by src builder).
+control_url = http://src.th.pld-linux.org/
+
+# Src builder's email for sending status notifications (build ok/failed).
+#notify_email = builder-th-src@pld-linux.org
+
+# Where to cache rpm's from binary builders locally
+rpm_cache_dir = /spools/ready
+
+# ------ SRC builder config:
+# List of name-version-release tag prefixes.
+#tag_prefixes = auto- auto-ac- auto-th-
+tag_prefixes = auto-th-
+
+# ------ Configs for particular builders:
+
+[th-src]
+arch = i686
+chroot = /home/pld/builderth/chroots/chroot-src/
+buildlogs_url = rsync://blogs-th@buildlogs.pld-linux.org/pld-buildlogs-th-SRPMS/
+ftp_url = scp://pldth@ep09.pld-linux.org:ftp/.incoming/SRPMS/
+notify_url = http://src.th.pld-linux.org:1234/
+test_ftp_url = scp://pldth@ep09.pld-linux.org:ftp/.test-builds/SRPMS/
+rpmqa_url = scp://pldth@ep09.pld-linux.org:ftp/.stat/builder/th/
+rpmqa_filename = rpmqa-SRPMS.txt
+
+[th-athlon]
+arch = athlon
+chroot = /home/users/builderth/chroot-athlon/
+buildlogs_url = /dev/null
+notify_url = http://src.th.pld-linux.org:1234/
+ftp_url = scp://pldth@ep09.pld-linux.org:ftp/.incoming/athlon/
+test_ftp_url = /dev/null
+rpmqa_url = /dev/null
+rpmqa_filename = nothing
+
+
--- /dev/null
+# This file lists passwords to rsync service on given
+# user@host.
+# Example entry:
+# foo@bar.com SeCr3t
+
+bl@roke.freak foobar123
--- /dev/null
+0. ABSTRACT
+
+Below is a description of the way the builder infrastructure works. You should
+read it in order to be able to understand what's exactly going on when you try
+to build a package.
+
+(It assumes you have a basic understanding of how RPM packages are created.)
+
+1. OVERVIEW
+
+The builder infrastructure is divided into three parts:
+- the client
+- the source builder
+- the binary builders
+
+The basic idea is to allow developers (requesters) to easily specify which
+package they want built, and have the rest of the system take care of building
+and upgrading packages, uploading them where appropriate and reporting back to
+the developers on the results (were there errors, how long it took to build,
+etc.).
+
+What follows is a more detailed description of what each part is supposed to
+do.
+
+1.1 NOTES ON UPLOADING
+
+When talking of uploading packages and logs it is meant as placing them
+somewhere on the original system (via a simple 'cp'; this also includes copying
+to /dev/null), or placing them on a remote system via the scp or rsync
+protocols.
+
+1.2 TOOLS USED
+
+99% of the system is written in python, with remaining parts using shell
+scripts (POSIX shell is assumed, bashisms are to be shot at sight and their
+authors beheaded in a public display). The presence of various helper tools is
+assumed. They are: gpg, rsync and scp command line utilities.
+
+2. SHORT DIAGRAM
+
+Below are complete basics of how data is tossed around. Only the most relevant
+transfers are presented.
+
+ status info status info
+ +--------------+<--+------------------+
+ | build | | |
+ V request | V src.rpm |
+requester ---------> src.builder ---------> bin.builders
+ | |
+ | arch.rpms |
+ FTP server <---+------------------+
+
+3. CLIENT
+
+Main task: sending build requests to source builders.
+
+This is the simplest part. It consists of a shell script (make-request.sh),
+which, based on the way it's configured and on the cmdline parameters given to
+it, generates an xml-formated build request, which it then signs with the
+requester's PGP key (using the gpg utility) and then sends it to the source
+builder via a sendmail compatible command line application (by default invoking
+"sendmail -t").
+
+Two modes of operation are:
+- sending requests to build chosen package(s) on a specified group of builders
+- sending a chosen command to be executed on a specified group of builders
+
+See HOWTO-CLIENT for a hands-on approach to setting yourself up for being able
+to send out build requests.
+
+4. SOURCE BUILDER (AKA SRC.BUILDER)
+
+Main task: building src.rpm packages and serving them to binary builders.
+Additional tasks: reporting back to the requester about the status of the build
+and uploading generated src.rpm packages where appropriate. Also -- tagging,
+access control, uploading buildlogs and keeping the 'queue.html' file in sync.
+
+The source builder is the central place for managing access rights for
+requesters. Upon receiving a request (via email), it first checks the pgp
+signature with which the request was signed, against the pgp key currently
+assigned to the requester (all done with the 'gpg' command). After verifying
+that the requester name/requester pgp signature pair match, it then checks if
+the requester is actually allowed to perform the actions she wants to perform.
+That is: does she have access to a specified binary builder, is she allowed to
+specify the 'upgrade' flag or not, is she allowed to use the priority she set
+for the request and even if she is allowed to request building of a given
+package.
+
+Once a request passes all verifications, the src.builder uses its own chroot
+equipped with a basic build environment to fetch the sources of a given
+package, tag them if configured to do so and create a src.rpm out of them (all
+done by invoking the 'builder' script as found in the SPECS module of the PLD
+cvs repository (see http://www.pld-linux.org/Repositories for details)).
+
+After finishing the build procedure the src.builder reports (via email) to the
+requester on the status of the build (OK/FAIL), updates the 'queue.html' file
+with that information (urls for PLD2.0 and 3.0 queues:
+http://ep09.pld-linux.org/~buildsrc/queue.html and
+http://src.th.pld-linux.org/queue.html) and, if configured to do so,
+uploads the complete log of the whole procedure (called the buildlog)
+somewhere, most likely to our PLD buildlogs system
+(http://buildlogs.pld-linux.org).
+
+If the build procedure was successful, the resulting src.rpm file is most
+likely uploaded somewhere (if configured to do so; in our case it's our FTP
+server) and, what is most important, it's being served via a http server for
+the binary builders to download. An xml-formated (and pgp signed) 'queue.gz'
+and a small 'max_req_no' files are also updated with relevant information, in
+order for the bin.builders to be able to notice new requests and figure out
+which are addressed to which builders.
+
+The last matter taken care of by the src.builder is receiving (gpg signed)
+emails from bin.builders with status information regarding what happened to
+various builds (whether they succeeded or failed) and updating the 'queue.html'
+file accordingly.
+
+See the HOWTO-BUILDERS file for a hands-on approach on setting up builders. All
+of the technical details are there.
+
+5. BINARY BUILDERS (AKA BIN.BUILDERS)
+
+Main task: building arch.rpm packages and uploading them where appropriate.
+Additional tasks: reporting back to the requester and the source builder on the
+status of the build. Also -- uploading buildlogs.
+
+Upon noticing that a new request has been published by the src.builder and
+verifying the appropriate gpg signatures, binary builders download the relevant
+src.rpm files and use their build-environment-equipped chroots to start
+building them (by invoking the 'rpmbuild' command). Once a build is completed
+(meaning arch.rpm files are produced, or the whole procedure is interrupted for
+whatever reason), the status of the build (OK/FAIL) is sent via email both to
+the requester and to the src.builder.
+
+Procedure for uploading the produced arch.rpms and buildlogs is the same as
+with the src.builder.
+
+Bin.builders however need to deal with one thing src.builders are mostly not
+affected by -- managing the build environments inside their chroots. That means
+installing new packages if required by a given build (using the 'poldek'
+package manager; http://poldek.pld-linux.org/), upgrading older packages for
+much the same reasons, downgrading from time to time and locally caching newly
+built files for short periods of times.
+
+All of this, along with lots of quirks and obscure details is explained in the
+HOWTO-BUILDERS file.
+
+6. Q/A
+
+Q: What's this 'tagging' all about with the src.builder?
+A: When a src.rpm is created and arch.rpms built based on it, it is sometimes
+useful to be able to check which sources were used to build them. Casual users
+might be satisfied with just extracting everything from the src.rpm (that is
+what they are uploaded to the ftp server for), but the old ones aren't always
+available and even if they are, developers require source control in order to
+work efficiently. Hence all sources used to build a given src.rpm for a given
+line are tagged appropriately in the PLD CVS repository, for example trying to
+build cthulu.spec on the 2.0 (codename Ac) line of our distro might result in a
+tag looking something like this being set -- 'auto-ac-cthulu-6.6-6'.
+(Technically the previous sentence is not 100% correct due to (a) the way our
+DISTFILES infrastructure impacts which sources are and aren't in the CVS repo
+and hence are subject to tagging and (b) specific policies regarding tagging
+for each distro line; for the latter refer to appropriate policies.)
+
+
+# vi: formatoptions=aw expandtab
--- /dev/null
+Note: this was supposed to be a project for my .uni, hence the structure. Still, it might contain some useful information.
+
+
+
+Design, Implementation And Future Development Of The PLD Builder Infrastructure
+
+Abstract
+
+The following paper presents the current state of and feature plans regarding the PLD Linux Distribution's builder infrastructure, that is, the set of software responsible for automatic production of binary RPM packages and for maintaining the build environments required to build them.
+
+1 Introduction
+
+As is the case with most open source projects, the PLD Linux Distribution (PLD in short; yes, that's a recursive acronym) has many times more work to get done, than it has developers with time available. This holds especially true with regards to the jobs of no immediate direct benefit to developers. While updating a particular piece of software seems worthwhile to most (mostly because they actually have a need for the new and/or patched version), the same does not hold true with regards to maintaining the distribution's infrastructure or taking part in various activities necessary to actually release a new version of PLD. Of course, it's bad when some important part of the infrastructure is down or the current release is behind schedule, it's another matter to actually find people willing to commit to maintaining those on a daily basis.
+
+People with enough knowledge and the will to take care of those things are hard to find and their time is of extremely high value. That is why it's very important not to waste it. The ultimate goal is to automate all of the activities where it is reasonable to do so (including coding extended fault-tolerance into the infrastructure), so to enable developers to
+
+1.1 Glossary
+
+PLD Linux Distribution -- an RPM-based Linux distribution originally started in Poland. Website available at http://pld-linux.org.
+
+RPM Package Manager -- one of the two most popular package management systems used by leading Linux distributions (the other one being DEB). Basic features include the ability to distribute binary software (that is in an already compiled, ready to run form), installing, uninstalling and upgrading that software and keeping extensive metadata (file listings, md5 hashes of all the files, info on other applications and libraries it might depend on, etc.) about it while it's present in the system.
+
+RPM Package -- a file (usually) containing an application, which then can be installed (using the RPM package manager) and run.
+
+Poldek -- a high level shell around the RPM package manager originally developed for and used by PLD.
+
+Builder -- a machine containing a properly set up build environment (compilers, libraries, etc.) and a set of management scripts used for creating RPM packages with as little human interaction as is possible.
+
+2 First Generation Builder Infrastructure Design
+
+Mostly shell scripts. TODO SOME RESEARCH ON HISTORY(?)
+
+3 Second Generation Builder Infrastructure Design
+
+Originally written by Michal Moskal during the summer of 2003, it was a big step forward over the previous design. Written in Python, it introduced a lot of functionality that drastically reduced the amount of work required to maintain and make use of builders.
+
+Over the years it has seen a steady stream of improvements with some major changes taking place during the winter of 2004/2005, when the new FTP Administration Infrastructure was being written.
+
+On a high level, all of the systems that are currently used to actually build, test and distribute packages consist of the following:
+
+- The CVS repository along with the distfiles server
+- The buildlogs server
+- The FTP administration infrastructure
+- The builder infrastructure
+
+3.1 The CVS Repository With The Distfiles Server And The Buildlogs Server
+
+The CVS repository along with the distfiles server are just places where application sources along with various patches and so called spec files reside (a spec files is basically a recipe telling the RPM software how to create an RPM package). 99% of the time builders just fetch files from them (the remaining 1% is of no interest here).
+
+PLD's CVS repository can be found at http://cvs.pld-linux.org.
+
+The buildlogs server is just a place where logs of the builds performed on builders reside. It's useful from time to time to be able to go through the whole process, usually during debugging (eg. when a given application somehow got built with a different set of build flags, then what a developer was expecting). 100% of the time builders just upload simple text files (the buildlogs) there.
+
+The buildlogs server can be reached at http://buildlogs.pld-linux.org.
+
+Both of the above systems are beyond the scope of this documents and no changes to them are currently planned.
+
+3.2 FTP Administration Infrastructure
+
+The currently used version was written around 2004/2005 in Python. As the name suggests, it's a set of scripts used for managing a large number of RPM packages (which *are* the distribution itself) on the PLD's FTP server. It is here, using those scripts, that decisions are made regarding which packages get dropped, which packages replace their older versions and which should (at least for the time being) be made available only to (willing) testers to download.
+
+The only interaction between the builders and the FTP admin infrastructure is that the former, in case of a successful build having been performed, upload the resulting RPM packages to the FTP server, so that the admin scripts can notice the new files, and take appropriate actions.
+
+No changes to this part of the infrastructure is currently being planned either.
+
+3.3 The Builder Infrastructure
+
+TODO
+Also: read ARCHITECTURE file.
+
+3.4 Summary
+
+In short, the build process looks like this:
+- A developer sends a request to the source builder to build package XYZ.
+- The source builder fetches necessary sources from the CVS repository and the distfiles server and pushes them to the binary builders (in the form of a source RPM package).
+- Both source and binary builders upload their resulting RPM packages to the FTP server, their buildlogs to the buildlogs server and inform the developer about what happened with his request via email. Additionally binary builders inform the source builder about what happened to the build they've just performed.
+
+4 Third Generation Builder Infrastructure Design
+
+The third generation design is more of an evolution rather then a revolution (contrary to the shift away from the primitive first versions). Most of the code will remain the same, however some fundamental changes to the low level communication protocols and metadata storage solutions will take place, which will result in the overall system being more robust and reliable and having a lot more flexibility (and a nice GUI).
+
+4.1 Communication Protocol Changes
+
+Switching from talking via emails to talking via xmlrpc over https. TODO WRITE MORE + RATIONALE
+
+4.2 Metadata Storage Changes
+
+Within the builder infrastructure, the source builder acts as the central hub. It receives build requests from developers, it fetches sources necessary to build a package, it pushes those sources to the binary builders and receives status reports from them. It's obvious, that the flexibility of the whole system depends largely on what the source builder is capable of.
+
+Unfortunately, as was explained in chapter 3.3, the current solution isn't suitable for anything advanced. Hence the new design.
+
+4.2.1 Database Layout
+
+What follows are the SQL queries used to create the database.
+
+[TODO]
+
+4.3 Summary Of Changes
+
+- Switch from using emails to xmlprc over https for any type of builder communication.
+- Design a robust, extensible database for storing source builder metadata.
+- Alter the source builder to use that database as it's backend.
+- Write a (PHP based) web interface for manipulating data in the aforementioned database.
+- Extend binary builders to take better advantage of source builder's new functionality.
+
+
+
--- /dev/null
+Roadmap:
+
+ Makefile -- nothing interesting.
+
+ PLD_Builder/ -- all python stuff live here.
+
+ admin/ -- scripts for chroot admin.
+
+ architektura.txt -- docs, in Polish.
+
+ client/ -- scripts for clients, i.e. developers sending requests
+
+ config/ -- configuration
+
+ go.py -- testing script, don't touch
+
+ bin/ -- scripts for builder account outside chroot
+
+ request-handler.sh
+ Script to be run from procmail on src builder. It process incoming
+ request, authorizes it and stores in spool/queue.
+
+ src-builder.sh
+ Script to be run from cron on src builder. It looks for new
+ requests in spool/queue, gets them from there, builds SRPM, and
+ stores request for binary builders in spool/req_queue (which is
+ mirrored in www/ directory, signed and compressed). SRPMS and
+ buildlogs are queued for transmission.
+
+ request-fetcher.sh
+ Run on binary builder.
+ Fetch queue.gz from src-builder, and distribute requests for all
+ builders hosted on given account (to spool/queue-<builder> files).
+
+ rpm-builder.sh <builder>
+ Run on binary builder.
+ Tries to aquire locks for <builder> and for job-slot. If that suceeds,
+ proccess one request from spool/queue-<builder>.
+
+ load-balancer.sh
+ Run on binary builder.
+ Checks spool/got_lock. Then run rpm-builder.sh in order determined
+ from this file (if b1 had lock last time before b2, first run
+ rpm-builder.sh b1 and then rpm-builder.sh b2), so builders get
+ the same number of requests.
+
+ file-sender.sh
+ Run on both binary and src builder.
+ Sends files queued in spool/{ftp,buildlogs}.
+
+Working directories:
+ lock/
+ spool/
+ log/
--- /dev/null
+new bin builder setup
+
+packages and chroot
+~~~~~~~~~~~~~~~~~~~
+1. install pld-builder from th repoeistory on target host
+
+2. create chroot /srv/chroot
+(you can symlink to real dest for the sake of copy-paste from here)
+mkdir -p /srv/chroot
+
+- if you're using rpm < 4.5-29, make system rpmdb linked to target rpmdb, as
+ rpm gets it all very messy. see LP#395177.
+mv /var/lib/rpm{,-host}
+ln -s /srv/chroot/var/lib/rpm /var/lib/rpm
+install -d /srv/chroot/var/lib/rpm
+rpm -r /srv/chroot --initdb
+
+therwise it's just:
+rpm -r /srv/chroot --initdb
+
+- install distro gpg key as default th config packages are signed and sign verify enabled in config:
+rpm -r /srv/chroot --import /etc/pki/rpm-gpg/PLD-*.asc
+
+- setup minimal /dev
+install -d /srv/chroot/dev
+cp -a /dev/{full,null,random,urandom,zero} /srv/chroot/dev
+
+- install vserver-packages, but as it is usually hidden, so you must unhide it
+ with --noignore:
+poldek -r /srv/chroot -u vserver-packages -Q --noignore
+
+- install pld-builder-chroot from th repos
+poldek -r /srv/chroot -u pld-builder-chroot -Q --sn th --sn th-ready
+
+- make rpmdb readable for builder user
+chmod -R a+rX /srv/chroot/var/lib/rpm
+
+- setup /srv/chroot/etc/resolv.conf so if you enter manually you can work with poldek
+cat /etc/resolv.conf > /srv/chroot/etc/resolv.conf
+
+- restore rpmdb hack
+rm -f /var/lib/rpm
+mv /var/lib/rpm{-host,}
+
+gpg keys
+~~~~~~~~
+1. import src builder key to bin builder so it can download queue.gz
+
+src-builder$ gpg --export builder-th-src@pld-linux.org --armor > th-src.asc
+bin-builder$ gpg --import < th-src.asc
+
+2. generate new key for bin builder and import it to src builder so it can
+ accept spool/notify messages
+
+3. import that public key to src builder keyring
+bin-builder$ gpg --gen-key
+bin-builder$ gpg --export KEYID --armor > th-i686.asc
+src-builder$ gpg --import < th-i686.asc
+
+ssh keys
+~~~~~~~~
+
+generate key on bin builder and add it to authorized_keys of ftp account
+
+i.e account where you push your uploads:
+[th-i686]
+ftp_url = scp://fpldth@ep09.pld-linux.org:ftp/.tree/.incoming/i686/
+
+bin-builder$ ssh-keygen
+bin-builder$ ssh-copy-id -i .ssh/id_rsa.pub fpldth@ep09.pld-linux.org
+
+buildlogs
+~~~~~~~~~
+buildlogs are copied with rsync. ask buildlogs.pld-linux.org admin to allow your ip
+also you need to setup password that is used to authenticate in rsync-passwords
+
+sudo access
+~~~~~~~~~~~
+make sure builder user (who runs crons) can sudo chroot to the chroots:
+builder ALL=(ALL) NOPASSWD: /usr/sbin/chroot /home/users/builder/chroot-th *
+
+testing
+~~~~~~~
+
+keep /var/lib/pld-builder/spool/log running with tail -f
+run the cronjobs under builder account.
--- /dev/null
+- Write spec preprocessor, that processes given spec file and expands
+ macros. Next we should process the output, look for all BuildRequires:
+ lines, and install them, instead of running rpmbuild --nobuild to see
+ what's missing, since it chokes when stuff is used in %(...), on missing
+ includes and so on.
+
+ get_br.py is close, but doesn't handle:
+ %if foo
+ BuildRequires: ...
+ %endif
+ which in fact requires expression parser :<
+
+- implement:
+ <command type='shell' id='...'>shell code</command>
+ <command type='upgrade' id='...' /> (run poldek --up; poldek -Q --shcmd 'upgrade -G *')
+ First requires command-shell:<builder>, second command-upgrade:<builder>.
+ This should be class in requester.py, with kind='command', also look for
+ kind checking elsewhere in the code (grep for 'kind.*group')
+
+- make default configs reasonable
+
+- fix make-request.sh to be more user-friendly and configurable
+
+- add possibility of tagging stuff for source builder
+
+- jabber daemon. it should connect to jabber server and login to
+ conference room. Next open unix socket, so other scripts can write it,
+ and jabber server forwards this to conference room.
+
+ Next step for such daemon would be to create second unix socket, where
+ scripts log what thay are doing so, we can do things like tail on
+ current buildlog.
+
+- add log.debug (log only with special options) to log.py, and use it
+
+- if magic comment found in spec: single-build -- aquire all job-slots
+
+- allow blocking some packages
+
+- fetch src.rpm once for all builders on this account, separate fetching
+ src.rpms, so we can build and fetch at the same time
+
+- unify buildlogs.py and ftp.py, both are file queues
+
+- ability to just copy src.rpm from ftp and make it the base for a request to
+ bin builders
--- /dev/null
+1. Developer wysyła zlecenie, z użyciem client/make-request.sh, na adres
+srpm buildera.
+
+2. Na koncie srpm buildera skrypt request_handler.py wołany z procmaila obsługuje
+ zlecenie.
+ a) sprawdza podpis gpg, wyciÄ…ga wszystkie Good sinature from <...>
+ jeśli brak -- wypad
+ b) szuka w swoim acl.conf czy osoba z Good signature from może robić
+ cokolwiek, w.p.p wypad
+ c) xml-parsuje zlecenie (request.py)
+ i. jeśli jest to <notifcation ...>, sparawdza uprawnienie
+ notify:<builder>, i jeśli OK, to zmienia odpowiednio
+ kolejkę spool/req_queue. Jeśli wszystki buildery
+ zakończyły już budowanie danej grupy usuwane są src rpmy
+ z www/srpms/<group-id>/. Generuje stronÄ™ ze statystykami
+ (www/queue.html).
+ ii. jeśli jest to <group ...> to sprawdza czy użytkownik,
+ który wysłał zlecenie ma uprawnienia src:<nazwa-src-buildera>,
+ oraz binary:<builder> dla każdego buildera dla którego jest
+ zlecenie. Jeśli OK, to wrzuca zlecenie do spool/queue
+
+3. Na koncie srpm buildera z crona chodzi skrypt srpm_builder.py.
+ a) Czyta spool/queue, jeśli są tam jakieś zlecenia, sortuje wg. priorytetu
+ (niższy numer == ważniejsze zlecenie), a następnie sortuje wg. czasu
+ przybycia zlecenia (starsze == ważniejsze), wyciąga je z kolejki i zapisuje
+ kolejkÄ™.
+ b) Obsługuje tylko <group ...>.
+ c) Buduje w chroot wszystkie pakiety z grupy, kolejkujÄ…c pliki w spool/ftp/
+ oraz spool/buildlogs/. Dodatkowo srpmy sÄ… wrzucane do www/srpms/<group-id>/
+ skÄ…d ciÄ…gnÄ… je bin-buildery.
+ d) jeśli nie powiodło się budowanie żadnego pakietu to wypad
+ e) zleceniu nadawany jest numer
+ f) zlecenie jest wrzucane do spool/req_queue
+ g) kolejka jest podpisywana kluczem srpm buildera, gzipowana i wrzucana do
+ www/queue.gz
+ h) numer zapisywany jest w www/max_req_no
+ i) generowanie strony ze statystykami
+
+4. Na kontach srpm buildera i bin-builderów chodzi
+ file_sender.py. Monitoruje on kolejki spool/{buildlogs,ftp}. SÄ… w
+ nich pliki, jak:
+
+ faa1f592-437f-446d-b1e6-ac41976c5775
+ faa1f592-437f-446d-b1e6-ac41976c5775.info
+ faa1f592-437f-446d-b1e6-ac41976c5775.desc
+
+ Plik .desc jest kontrolny dla file_sender.py. Zawiera email zlecajÄ…cego
+ (do alarmowania), czas skolejkowania (pliki są wysyłane dokładnie
+ w kolejności wrzucania do kolejki), oraz cel (url), gdzie należy
+ przesłać plik.
+
+ Plik .info jest tylko dla buildlogów. Jeśli taki plik istnieje to jest
+ przesyłany po przesłaniu właściwego pliku (tego bez rozszerzenia). Jest
+ w nim zapisany status buildloga (OK|FAIL). helpers/buildlogs-mover.sh
+ używa tych plików.
+
+ Pliki .info i .desc kończa się linią, zawierającą słowo END. Skrypty
+ nic z nimi nie robią jeśli nie ma tam tego słowa (transmisja
+ niedokończona).
+
+ URLe wspierane jako cel to:
+
+ rsync://user@host/modul/sci/ezka/plik
+ scp://user@host/sciezka/plik
+ /absolutna/sciezka/do/pliku
+
+ W pliki config/rsync-passwords są hasła do rsync, w formacie:
+
+ user@host hasło
+
+ scp działa po kluczach (z ~/.ssh)
+
+5. Na koncie bin-buildera chodzi skrypt request_fetcher.py.
+ a) ściąga $control_url/max_req_no i porównuje ze spool/last_req_no.
+ jeśli takie same to wypad.
+ b) ściąga $control_url/queue.gz, dekompresuje, sprawdza podpis (w
+ config/acl.conf dla podpisującego użytkownika musi być
+ "sign_queue:all") [sidenote: konto bin buildera nie potrzebuje
+ kluczy gpg innych niż swój i srpm buildera, nie potrzebuje też
+ acl.conf pełnego, tylko srpm_builder z sign_queue:all]
+ c) wrzuca zlecenia do spool/queue
+ d) zapisuje największy numer zlecenia wrzuconego w spool/last_req_no.
+
+6. Na koncie bin-buildera chodzi skrypt rpm_builder.py.
+ a) sprawdzenie loadu, jeśli za wysoki to papa
+ b) lockowanie build-slot-N, gdzie N < job_slots, jeśli sie nie da
+ to papa
+ c) lockowanie building-rpm-for-<builder> (tylko jeden build w chroot
+ na raz)
+ d) Czyta spool/queue, jeśli są tam jakieś zlecenia, sortuje wg. priorytetu
+ (niższy numer == ważniejsze zlecenie), a następnie sortuje wg. czasu
+ przybycia zlecenia (starsze == ważniejsze), wyciąga je z kolejki i zapisuje
+ kolejkÄ™.
+ e) buduje pakiety, wrzuca pliki do spool/{buildlogs,ftp}. Jeśli nie ma flagi
+ test-build to pakiety wrzuca też do /spools/ready/ w chroot (i generuje
+ tam idx poldka)
+
+Budowanie pakietów:
+ 1. ściągnięcie srpm
+ 2. instalacja srpm
+ 3. próba budowania (z --nobuild), wyłapanie "foo is needed by ...",
+ instalacja wszystkich takich foo. UWAGA: to nie zawsze działa, np. jeśli
+ rpm wywali się z braku pliku do %include. trzeba napisać osobny parser.
+ 4. budowanie
+ 5. jeśli nie test-build to przerzucenie pakietów do /spools/ready/
+ 6. jeśli upgrade, to próba upgrejdu, wywalenie wszystkich przeszkadzających
+ pakietów, chyba, że trzeba by wywalić poldka, lub rpm-build.
+ 7. upgrade
--- /dev/null
+W katalogu client jest skrypt nazywajÄ…cy siÄ™ make-request.sh. Odpalamy go
+bez argumentów po czym zaglądamy do pliku ~/.requestrc. Najlepszy będzie
+przykład więc poniżej ustawienia, które trzeba zmienić:
+
+ requester=mmazur
+ default_key=mmazur@kernel.pl
+
+Przy czym:
+
+ [mmazur@home mmazur]$ gpg --list-secret-keys|grep '@'
+ sec 1024D/A1490DA4 2003-08-14 Mariusz Mazur <mmazur@kernel.pl>
+
+Mam nadzieję, że teraz jest jasne skąd się ten email bierze.
+
+Na razie obowiÄ…zujÄ…cymi ustawieniami sÄ…:
+
+ build_mode=ready
+ f_upgrade=yes
+
+Po wyrównaniu ilości pakietów na ftpie z tym co jest w Ra przechodzimy na
+ustawienia:
+
+ build_mode=test
+ f_upgrade=no
+
+Ale tym na razie nie trzeba się martwić, bo gdy przyjdzie czas, to będę
+o tym trąbił.
+
+Teraz ćwiczenia praktyczne:
+
+ make-request.sh kernel.spec:LINUX_2_6
+ make-request.sh qt.spec kadu.spec
+ make-request.sh -b 'th-i* th-x86_64' nasm.spec
+
+Pierwszy przykład to puszczenie zlecenia na pakiet kernel z brancha LINUX_2_6.
+Drugi to puszczenie w jednym zleceniu qt i kadu, przy czym jeśli budowanie
+qt się wywróci, to automatyka nawet nie będzie próbowała budować kadu.
+Ostatni przykład to puszczenie nasma tylko i wyłącznie na buildery x86
+(th-i* rozwija się na to samo, co th-i?86). Zwracam uwagę, że przy
+listowaniu tych buidlerów trzeba je wycytować, żeby szły jako jeden
+argument.
+
+Każdy dostaje mailem informacje o zleceniach które wysyła (przy czym maile
+z tymi informacjami przychodzÄ… nie na adres w ~/.requestrc, ale na adres
+zdefiniowany w konfigach buildera, więc sugerowałbym wybieranie aliasa
+@pld-linux.org, żeby móc to samemu zmieniać, bez konieczności interwencji
+kogoś z bezpośrednim dostępem do odpowiedniego buildera). Jeśli chcesz być
+informowany o wszystkich zleceniach, to musisz się zapisać na listę
+pld-logs-builder@pld-linux.org i/lub śledzić co się dzieje na
+http://src.th.pld-linux.org/queue.html
+
+Ponieważ póki co domyślnie pakiety lądują w katalogu ready na ftpie i po
+zbudowaniu nowe wersje są automatycznie upgrejdowane na builderze, więc
+przez pewien czas pewnie przydatne będzie poniższe wywołanie:
+
+ make-request.sh -t nasm.spec
+
+Skutek będzie taki, że pakiet się zbuduje, ale nie zostanie automatycznie
+zupgrejdowany na builderach, a zamiast w ready wyląduje w test (póki co
+cieciwa używa tego do budowania sobie w spokoju jajek 2.6).
+
+Zasady puszczania do Th:
+
+- Puszczamy zawsze z HEAD i bez bcondów. Odstępstwa od tej zasady są
+ akceptowalne tylko i wyłącznie w dobrze uzasadnionych przypadkach. HEAD ma
+ na celu łatwiejszą orientację w zawartości ftpa. Natomiast brak bcondów jest
+ wedle zasady "src.rpm ma się budować w środowisku, jakie jest dostępne na
+ ftpie (wyjątek to oczywiście java) i nie oczekujmy wiedzy tajemnej (jakiego
+ bconda użyć) od wszystkich, którzy chcą dany pakiet zbudować".
--- /dev/null
+Installation instructions.
+
+1. Accounts
+
+Builder needs account for it's needs. Account should be shared for all builders,
+which build binary packages on specified host. This allows for better
+resources managment (hint: variable job_slots in configuration).
+
+However, SRPMS builder should use different account.
+
+We will call accounts: bin_builder and srpms_builder.
+
+2. Requirements
+
+Both:
+ sending mail
+ gpg
+ scp or/and rsync (sending buildlogs and files to ftp)
+ access to chroots
+
+srpms_builder:
+ mail box + procmail
+ posibillity to serve files via http
+ cvs connections
+ ftp or http connections (distfiles)
+
+bin_builder:
+ http connections
+
+access to chroots via sudo (sudoers):
+
+srpms_builder ALL = NOPASSWD: \
+ /usr/sbin/chroot /path/to/chroot *
+
+bin_builder ALL = NOPASSWD: \
+ /usr/sbin/chroot /path/to/chroot1 *, \
+ /usr/sbin/chroot /path/to/chroot2 *
+
+3. Scripts installations
+
+Scripts *have* to be in ~/pld-builder.new/, checked out from CVS.
+
+4. Installation of queues
+
+cd ~/pld-builder.new
+./admin/fresh-queue.sh y
+
+5. Chroots setup
+poldek.conf:
+
+source = local,pri=1 /spools/ready/
+source = main-test,noauto,pri=2 ftp://ftp.pld-linux.org/dists/th/test/i686/RPMS/
+source = main-ready,pri=3 ftp://ftp.pld-linux.org/dists/th/ready/i686/RPMS/
+source = main-ug,pri=4 ftp://ftp.pld-linux.org/dists/th/updates/i686/RPMS/
+source = main,pri=5 ftp://ftp.pld-linux.org/dists/th/PLD/i686/RPMS/
+particle_install = no
+greedy = yes
+rpmdef = _excludedocs 1
+cachedir = /spools/poldek/
+keep_downloads = yes
+
+Preinstalled packages:
+
+bin_builder:
+ rpm-build poldek shadow net-tools which
+
+srpms_builder:
+ bin_builder+
+ cvs wget rpm-perlprov rpm-php-pearprov rpm-pythonprov
+
+both: user builder
+
+both: ~builder/rpm/{SPECS,SOURCES,RPMS,SRPMS,BUILD}
+
+srpms_build: additionally has to have SPECS and SOURCES checkouted from CVS
+in SPECS it has to have builder, additional-md5sum and mirrors
+
+both: folders /spools/poldek/ /spools/ready/
+
+Wszystko to robi automagicznie admin/install-chroot.sh
+All that stuff can be done automaticly with admin/install-chroot.sh.
+
+[[...]]
+
+
+100. cron jobs
+
+srpms_builder:
+ src-builder.sh
+ file-sender.sh
+
+bin_builder:
+ request-fetcher.sh
+ load-balancer.sh
+ file-sender.sh
+
+*Very* good idea is to execute tmpwatch for each builder.:
+5 3 * * * sudo chroot $HOME/chroots/chroot-i686/ tmpwatch -m 200 /spools/ready/
+(autentic example). Of course, example cron item above should be placed for each chroot
+inside account. Tmpwatch have to be installed inside. In other case, You have to clean it
+manually.
+
+Example for bin builder:
+* * * * * $HOME/pld-builder.new/bin/request-fetcher.sh
+* * * * * $HOME/pld-builder.new/bin/load-balancer.sh
+* * * * * $HOME/pld-builder.new/bin/file-sender.sh
+30 3 * * * $HOME/pld-builder.new/bin/maintainer.sh
+
+Example for source builder:
+* * * * * $HOME/pld-builder.new/bin/src-builder.sh
+* * * * * $HOME/pld-builder.new/bin/file-sender.sh
+30 3 * * * $HOME/pld-builder.new/bin/maintainer.sh
+
+101. procmail
+
+srpms_builder:
+
+:0
+* ^X-New-PLD-Builder:
+|~/pld-builder.new/bin/request-handler.sh
+
+:0
+!admin@somehere
+
+bin_builder
+
+:0
+!admin@somwhere
+
+102. httpd
+
+Directory ~/pld-builder.new/www/, in srpm_builder account, have to be accessibble
+via http (control_url in bin_builder configuration)
+
+103. src builder
+
+Automat can send informaction (notify) between bin-builders and src-builders
+via e-mail or via http (POST METHOD).
+
+To use HTTP POST, one should run (src_builder side):
+./request_handler_server.py
+On other side (bin-builders):
+notify_url = http://some-server.pl:1234/
+
--- /dev/null
+SHELL=/bin/sh
+PATH=/sbin:/bin:/usr/sbin:/usr/bin
+MAILTO=root
+
+#* * * * * builder exec nice -n 19 /usr/share/pld-builder/bin/request-fetcher.sh
+#* * * * * builder exec nice -n 19 /usr/share/pld-builder/bin/load-balancer.sh
+#* * * * * builder exec nice -n 19 /usr/share/pld-builder/bin/file-sender.sh
+
+#0 0 * * * root chroot /srv/chroot nice -n 19 tmpwatch -m 240 /var/cache/pld-builder/ready
--- /dev/null
+#!/bin/sh
+#
+# pld-builder perform adminstrator's tasks for PLD Linux Builder
+#
+# chkconfig: 345 99 01
+#
+# description: perform adminstrator's tasks for PLD Linux Builder
+#
+# $Id$
+
+# Source function library
+. /etc/rc.d/init.d/functions
+
+# Defaults
+CHROOTS=
+
+# Get service config - may override defaults
+[ -f /etc/sysconfig/pld-builder ] && . /etc/sysconfig/pld-builder
+
+mount_chroots() {
+ # mount /proc in chroots
+ local ret
+ for CHROOT in $CHROOTS; do
+ show "chroot: %s mount /proc" "$CHROOT"
+ mount -t proc -o gid=17 none $CHROOT/proc
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ ok
+ else
+ fail
+ RETVAL=$ret
+ fi
+ done
+}
+
+umount_chroots() {
+ local ret
+ for CHROOT in $CHROOTS; do
+ show "chroot: %s umount /proc" "$CHROOT"
+ umount $CHROOT/proc
+ [ $? -eq 0 ] && ok || fail
+ done
+}
+
+chroots_status() {
+ local ret
+ for CHROOT in $CHROOTS; do
+ show "chroot: %s is /proc mounted?" "$CHROOT"
+ test -f $CHROOT/proc/mounts
+ ret=$?
+ if [ $ret -eq 0 ]; then
+ ok
+ else
+ fail
+ RETVAL=$ret
+ fi
+ done
+}
+
+start() {
+ # Check if the service is already running?
+ if [ -f /var/lock/subsys/pld-builder ]; then
+ msg_already_running "PLD Linux Builder"
+ return
+ fi
+
+ if [ -z "$CHROOTS" ]; then
+ # no chroots configured. return and be silent
+ return
+ fi
+ msg_starting "PLD Linux Builder"
+ busy; echo
+ mount_chroots
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/pld-builder
+}
+
+stop() {
+ if [ ! -f /var/lock/subsys/pld-builder ]; then
+ msg_not_running "PLD Linux Builder"
+ return
+ fi
+
+ # Stop daemons.
+ msg_stopping "PLD Linux Builder"
+ busy; echo
+ umount_chroots
+ rm -f /var/lock/subsys/pld-builder >/dev/null 2>&1
+ RETVAL=0
+}
+
+condrestart() {
+ if [ ! -f /var/lock/subsys/pld-builder ]; then
+ msg_not_running "PLD Linux Builder"
+ RETVAL=$1
+ return
+ fi
+
+ stop
+ start
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ try-restart)
+ condrestart 0
+ ;;
+ force-reload)
+ condrestart 7
+ ;;
+ status)
+ chroots_status
+ RETVAL=$?
+ ;;
+ *)
+ msg_usage "$0 {start|stop|restart|try-restart|force-reload|status}"
+ exit 3
+ ;;
+esac
+exit $RETVAL
--- /dev/null
+# define chroots whose /proc to mount on startup
+
+#CHROOTS=/srv/builders/pld/{ac/{athlon,i586},actw/{athlon,i586},th/{athlon,i486}}
--- /dev/null
+# locally cached rpms
+[source]
+name = ready
+pri = -1
+type = dir
+path = /var/cache/pld-builder/ready/
+autoup = no
--- /dev/null
+LOGFILE=procmail.log
+
+#:0 c
+#mail.copy
+
+:0
+* ^X-New-PLD-Builder:
+| /usr/share/pld-builder/bin/request-handler.sh
+
+:0
+* ^FROM_MAILER
+/dev/null
+
+#:0
+#!root@example.org
--- /dev/null
+# rpm macros for pld builder chroot
+
+# A colon separated list of desired locales to be installed;
+# "all" means install all locale specific files.
+%_install_langs en_US
+
+# If non-zero, all erasures will be automagically repackaged.
+%_repackage_all_erasures 0
+
+# Boolean (i.e. 1 == "yes", 0 == "no") that controls whether files
+# marked as %doc should be installed.
+# FIXME: excludedocs breaks kde build
+#%_excludedocs 1
--- /dev/null
+# allow to run all comments under builder chroot
+#builder ALL=(ALL) NOPASSWD: /usr/sbin/chroot /srv/chroot *
--- /dev/null
+<!ELEMENT queue (group*)>
+
+<!ELEMENT group (requester,time,priority,batch+)>
+<!ATTLIST group id CDATA #REQUIRED>
+<!ATTLIST group no CDATA #REQUIRED>
+<!ATTLIST group flags CDATA "">
+
+<!ELEMENT requester (#PCDATA)>
+<!ATTLIST requester email CDATA #REQUIRED>
+
+<!ELEMENT time (#PCDATA)>
+
+<!ELEMENT priority (#PCDATA)>
+
+<!ELEMENT batch (src-rpm,command,spec,branch,info,kernel?,(with|without)*,builder+)>
+<!ATTLIST batch id CDATA #REQUIRED>
+<!ATTLIST batch depends-on CDATA "">
+
+<!ELEMENT src-rpm (#PCDATA)>
+
+<!ELEMENT command (#PCDATA)>
+<!ATTLIST command flags CDATA "">
+
+<!ELEMENT spec (#PCDATA)>
+
+<!ELEMENT branch (#PCDATA)>
+
+<!ELEMENT info (#PCDATA)>
+
+<!ELEMENT kernel (#PCDATA)>
+
+<!ELEMENT with (#PCDATA)>
+
+<!ELEMENT without (#PCDATA)>
+
+<!ELEMENT builder (#PCDATA)>
+<!ATTLIST builder status CDATA #REQUIRED>
+
+
--- /dev/null
+<?xml version="1.0"?>
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
+targetNamespace="http://pld-linux.org"
+xmlns="http://pld-linux.org"
+elementFormDefault="qualified">
+
+<xs:element name="queue">
+<xs:complexType>
+<xs:sequence>
+ <xs:element name="group" minOccurs="0" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="requester">
+ <xs:complexType mixed="true">
+ <xs:attribute name="email" type="xs:string"/>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="time" type="xs:integer"/>
+ <xs:element name="priority" type="xs:integer"/>
+ <xs:element name="batch" maxOccurs="unbounded">
+ <xs:complexType>
+ <xs:sequence>
+ <xs:element name="src-rpm" type="xs:string"/>
+ <xs:element name="command">
+ <xs:complexType mixed="true">
+ <xs:attribute name="flags" type="xs:string" default=""/>
+ </xs:complexType>
+ </xs:element>
+ <xs:element name="spec" type="xs:string"/>
+ <xs:element name="branch" type="xs:string"/>
+ <xs:element name="info" type="xs:string" default=""/>
+ <xs:element name="kernel" type="xs:string" minOccurs="0"/>
+ <xs:element name="with" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="without" type="xs:string" minOccurs="0" maxOccurs="unbounded"/>
+ <xs:element name="builder" maxOccurs="unbounded">
+ <xs:complexType mixed="true">
+ <xs:attribute name="status" type="xs:string" default=""/>
+ </xs:complexType>
+ </xs:element>
+ </xs:sequence>
+ <xs:attribute name="id" type="xs:string"/>
+ <xs:attribute name="depends-on" type="xs:string" default=""/>
+ </xs:complexType>
+ </xs:element>
+ </xs:sequence>
+ <xs:attribute name="id" type="xs:string"/>
+ <xs:attribute name="no" type="xs:integer"/>
+ <xs:attribute name="flags" type="xs:string" default=""/>
+ </xs:complexType>
+ </xs:element>
+</xs:sequence>
+</xs:complexType>
+</xs:element>
+
+</xs:schema>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+<!-- TODO: 1) links
+ 2) proper dates with http://www.djkaty.com/drupal/xsl-date-time -->
+
+<xsl:template match="/queue">
+<html><head><title>PLD builder queue</title></head><body>
+<xsl:for-each select="group">
+ <xsl:sort select="@no" order="descending"/>
+ <p><b><xsl:value-of select="@no"/></b>. <xsl:value-of select="time"/> from <xsl:value-of select="requester"/><xsl:text> </xsl:text><small><xsl:value-of select="@id"/>, <xsl:value-of select="priority"/>, <xsl:value-of select="@flags"/></small><br/>
+ <ul>
+ <xsl:for-each select="batch">
+ <li>
+ <xsl:value-of select="src-rpm"/>
+ (<xsl:value-of select="spec"/> -R <xsl:value-of select="branch"/>
+ <xsl:for-each select="with | without">
+ <xsl:if test="name() = 'with'">
+ <xsl:text> --with </xsl:text>
+ </xsl:if>
+ <xsl:if test="name() = 'without'">
+ <xsl:text> --without </xsl:text>
+ </xsl:if>
+ <xsl:value-of select="."/>
+ <xsl:if test="position() != last()">
+ <xsl:text> </xsl:text>
+ </xsl:if>
+ </xsl:for-each>
+ <xsl:if test="kernel">
+ <xsl:text> --define 'alt_kernel </xsl:text>
+ <xsl:value-of select="kernel"/>'
+ </xsl:if>)
+ <small>
+ [<xsl:for-each select="builder"><b>
+ <xsl:choose>
+ <xsl:when test="@status = 'OK'">
+ <font color='green'><xsl:value-of select="."/>:<xsl:value-of select="@status"/></font>
+ </xsl:when>
+ <xsl:when test="@status = 'FAIL'">
+ <font color='red'><xsl:value-of select="."/>:<xsl:value-of select="@status"/></font>
+ </xsl:when>
+ <xsl:otherwise>
+ <font color='black'><xsl:value-of select="."/>:<xsl:value-of select="@status"/></font>
+ </xsl:otherwise>
+ </xsl:choose>
+ </b>
+ <xsl:if test="position()!=last()"><xsl:text> </xsl:text></xsl:if>
+ </xsl:for-each>]
+ </small>
+ </li>
+ </xsl:for-each>
+ </ul>
+</p>
+</xsl:for-each>
+</body></html>
+</xsl:template>
+
+</xsl:stylesheet>
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
+
+<!-- TODO: 1) links
+ 2) proper dates with http://www.djkaty.com/drupal/xsl-date-time -->
+
+<xsl:template match="/queue">
+
+<fo:root xmlns:fo="http://www.w3.org/1999/XSL/Format" font-size="10pt">
+
+<fo:layout-master-set>
+ <fo:simple-page-master master-name="A4" page-width="210mm" page-height="297mm" margin="1cm">
+ <fo:region-body margin="0cm"/>
+ <fo:region-before extent="0cm"/>
+ <fo:region-after extent="0cm"/>
+ <fo:region-start extent="0cm"/>
+ <fo:region-end extent="0cm"/>
+ </fo:simple-page-master>
+</fo:layout-master-set>
+<fo:page-sequence master-reference="A4">
+<fo:flow flow-name="xsl-region-body">
+
+
+<xsl:for-each select="group">
+ <xsl:sort select="@no" order="descending"/>
+
+ <fo:block space-before="2mm" space-after="2mm"><fo:inline font-weight="bold"><xsl:value-of select="@no"/></fo:inline>. <xsl:value-of select="time"/> from <xsl:value-of select="requester"/><xsl:text> </xsl:text><fo:inline font-size="small"><xsl:value-of select="@id"/>, <xsl:value-of select="priority"/>, <xsl:value-of select="@flags"/></fo:inline></fo:block>
+ <fo:list-block space-before="2mm" space-after="2mm">
+ <xsl:for-each select="batch">
+ <fo:list-item space-before="2mm" space-after="2mm">
+ <fo:list-item-label start-indent="5mm">
+ <fo:block font-family="monospace">*</fo:block>
+ </fo:list-item-label>
+ <fo:list-item-body start-indent="9mm">
+ <fo:block>
+ <xsl:value-of select="src-rpm"/>
+ (<xsl:value-of select="spec"/> -R <xsl:value-of select="branch"/>
+ <xsl:for-each select="with | without">
+ <xsl:if test="name() = 'with'">
+ <xsl:text> --with </xsl:text>
+ </xsl:if>
+ <xsl:if test="name() = 'without'">
+ <xsl:text> --without </xsl:text>
+ </xsl:if>
+ <xsl:value-of select="."/>
+ <xsl:if test="position() != last()">
+ <xsl:text> </xsl:text>
+ </xsl:if>
+ </xsl:for-each>
+ <xsl:if test="kernel">
+ <xsl:text> --define 'alt_kernel </xsl:text>
+ <xsl:value-of select="kernel"/>'
+ </xsl:if>)
+ <fo:inline font-size="small">
+ [<xsl:for-each select="builder">
+ <xsl:choose>
+ <xsl:when test="@status = 'OK'">
+ <fo:inline color="green"><xsl:value-of select="."/>:<xsl:value-of select="@status"/></fo:inline>
+ </xsl:when>
+ <xsl:when test="@status = 'FAIL'">
+ <fo:inline color="red"><xsl:value-of select="."/>:<xsl:value-of select="@status"/></fo:inline>
+ </xsl:when>
+ <xsl:otherwise>
+ <fo:inline color="black"><xsl:value-of select="."/>:<xsl:value-of select="@status"/></fo:inline>
+ </xsl:otherwise>
+ </xsl:choose>
+ <xsl:if test="position()!=last()"><xsl:text> </xsl:text></xsl:if>
+ </xsl:for-each>]
+ </fo:inline>
+ </fo:block>
+ </fo:list-item-body>
+ </fo:list-item>
+ </xsl:for-each>
+ </fo:list-block>
+
+</xsl:for-each>
+
+</fo:flow>
+</fo:page-sequence>
+
+</fo:root>
+
+</xsl:template>
+
+</xsl:stylesheet>
+