1
0
mirror of https://git.FreeBSD.org/ports.git synced 2025-01-11 07:22:22 +00:00

Remove portbuild scripts from pcvs, as they now live in svn/projects.

This commit is contained in:
Florent Thoumie 2011-04-24 16:37:51 +00:00
parent 5c392de08e
commit 03074365a5
Notes: svn2git 2021-03-31 03:12:20 +00:00
svn path=/head/; revision=273139
72 changed files with 0 additions and 9079 deletions

View File

@ -1,3 +0,0 @@
.cvsignore
README.forwarded_ports
*/dotunnel.*

View File

@ -1,32 +0,0 @@
Various package build nodes require us to set up TCP tunnels to talk
to them. (Some systems don't pass certain ports; some systems have
firewalls; some systems have multiple nodes on one IP address.)
These have always been hardcoded in crontab lines of the form "while true;
do <hardcoded-tunnel-stuff>; done". Other than the magic hardcoding,
there's a problem with this. When the tunnel command exits, such as if
the host suddenly becoming unreachable, it doesn't send mail -- instead
it just accumulates a huge file in /var/spool/clientmqueue which never
gets sent. To add insult to injury, /var is on the root partition on
pointyhat.
To cure these problems, we now have
/var/portbuild/conf/<arch>/dotunnel.XXX
where XXX corresponds to one line in the old crontab. Each script sets
up one tunnel, sends mail to the user(s) in
/var/portbuild/<arch>/portbuild.conf
once the command exits, and then sleeps.
Why not put it in /var/portbuild/<arch> you ask? That directory is
propogated to all nodes for that arch. This would be a security leak.
The intention is that none of the dotunnel files will be checked into
CVS.
Final note: each script figures out which arch it is for by fiddling
with its $0, so invoke it with its full pathname.
mcl

View File

@ -1,41 +0,0 @@
#
# client-side definitions (used in /var/portbuild/scripts/portbuild)
#
# $FreeBSD$
#
#
# items to be customized per each package build master
#
# for nodes where disconnected=0, the NFS host they should mount ports/
# and src/ from
CLIENT_NFS_MASTER=pointyhat.FreeBSD.org
# fallback for distfiles (see make.conf in bindist-*.tar)
CLIENT_BACKUP_FTP_SITE=pointyhat.FreeBSD.org
# where completed packages get uploaded
CLIENT_UPLOAD_HOST=pointyhat.FreeBSD.org
#
# items that will most likely be common to all package build masters
#
CLIENT_DISTDIR=/tmp/distfiles
# XXX renaming this from PACKAGES
CLIENT_PACKAGES_LOCATION=/tmp/packages
CLIENT_SRCBASE=/usr/src
CLIENT_WRKDIRPREFIX=/work
# wait 2 hours before killing build with no output
CLIENT_BUILD_TIMEOUT=7200
CLIENT_FTP_TIMEOUT=900
CLIENT_HTTP_TIMEOUT=900
# to prevent runaway processes -- 400 meg file size limit, 2 hours CPU limit
CLIENT_ULIMIT_F=819200
CLIENT_ULIMIT_T=7200
# debugging definitions
CLIENT_MALLOC_OPTIONS=AJ

View File

@ -1,21 +0,0 @@
#
# package building configuration file containing things that are common
# both to the server-side (pointyhat instance) and the client side
# (individual build clients).
#
# original author: linimon
#
# $FreeBSD$
#
#
# top-level package building things. These will probably be common
# to all package build masters.
#
LOCALBASE=/usr/local
PKGSUFFIX=.tbz
ARCHS_REQUIRING_AOUT_COMPAT="i386"
ARCHS_REQUIRING_LINPROCFS="amd64 i386"
ARCHS_SUPPORTING_COMPAT_IA32="amd64 i386 ia64"

View File

@ -1,16 +0,0 @@
# DON'T SET PORT VARIABLES UNCONDITIONALLY - THEY NEED TO BE
# OVERRIDABLE BY THE SCRIPTS
USA_RESIDENT?=YES
#
MASTER_SITE_BACKUP= \
ftp://ftp-master.freebsd.org/pub/FreeBSD/ports/distfiles/${DIST_SUBDIR}/
MASTER_SITE_OVERRIDE?= ${MASTER_SITE_BACKUP}
MASTER_SITE_LOCAL= \
ftp://ftp-master.FreeBSD.org/pub/FreeBSD/ports/local-distfiles/%SUBDIR%/
NO_PROFILE=true
MAKE_KERBEROS5= yes
SENDMAIL_CF= freefall.cf
BOOT_PXELDR_PROBE_KEYBOARD= true
ENABLE_SUID_K5SU=yes

View File

@ -1,105 +0,0 @@
#
# package building configuration file (server-side). Specific to each
# pointyhat instance.
#
# note: readable by both Python and /bin/sh files. HOWEVER, there is no
# code yet to do the {}-style shell expansions in the Python scripts.
# Beware!
#
# original author: linimon
#
# $FreeBSD$
#
#
# top-level package building things
#
SUPPORTED_ARCHS="amd64 i386 ia64 powerpc sparc64"
SRC_BRANCHES="7 8 9"
SRC_BRANCHES_PATTERN="^[0-9]*"
SRC_BRANCH_7_TAG=RELENG_7_3
SRC_BRANCH_8_TAG=RELENG_8_1
SRC_BRANCH_9_TAG=.
DEFAULT_LINUX_OSRELEASE="2.6.16"
#
# directory management definitions
#
ZFS_VOLUME=a
ZFS_MOUNTPOINT=/a
SNAP_DIRECTORY=snap
SNAP_PORTS_DIRECTORY=${SNAP_DIRECTORY}/ports-head
SNAP_SRC_DIRECTORY_PREFIX=${SNAP_DIRECTORY}/src-
SUPFILE_DIRECTORY=/home/portmgr/sup
PORTS_MASTER_SUPFILE=${SUPFILE_DIRECTORY}/ports-master-supfile
SRC_MASTER_SUPFILE=${SUPFILE_DIRECTORY}/src-master-supfile
PORTS_SUPFILE=${SUPFILE_DIRECTORY}/ports-supfile
SRC_SUPFILE=${SUPFILE_DIRECTORY}/src-supfile
WORLDDIR=${ZFS_MOUNTPOINT}/chroot/
# XXX TODO (note: Python script, so avoid {})
#zbackup a/nfs a/local a/portbuild/* /dumpster
#zexpire a/nfs a/local a/portbuild/* a/snap/*
#
# buildproxy definitions (note: Python script, so avoid {})
#
BUILDPROXY_SOCKET_FILE=/tmp/.build
#
# pdispatch definitions
#
# reflect hardwiring in 'buildscript' phase 1 and also 'processonelog' and
# 'processlogs2'. You probably do not want to change this!
PDISPATCH_HDRLENGTH=6
# number of lines of log to email
PDISPATCH_LOGLENGTH=1000
# wait 100 hours maximum
PDISPATCH_TIMEOUT=360000
#
# qmanager definitions (note: Python script, so avoid {})
#
QMANAGER_PATH=/var/portbuild/evil/qmanager
QMANAGER_DATABASE_FILE=qdb.sl3
QMANAGER_SOCKET_FILE=/tmp/.qmgr
QMANAGER_PRIORITY_PACKAGES="openoffice kde-3"
# maximum number of times to build an individual job
QMANAGER_MAX_JOB_ATTEMPTS=5
# attempt to limit the amount time (and email) on botched runs
QMANAGER_RUNAWAY_PERCENTAGE=0.75
QMANAGER_RUNAWAY_THRESHOLD=100
#
# upload definitions (see 'cpdistfiles')
#
UPLOAD_DIRECTORY="w/ports/distfiles/"
UPLOAD_TARGET="ftp-master.FreeBSD.org"
UPLOAD_USER="portmgr"
#
# user-visible things
#
MASTER_URL="pointyhat.FreeBSD.org"
#
# www definitions (see processfail)
#
WWW_DIRECTORY=/usr/local/www/data/

View File

@ -1,984 +0,0 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2//EN">
<html>
<head>
<meta name="generator" content="HTML Tidy, see www.w3.org">
<title>FreeBSD Package building logs and errors</title>
</head>
<body>
<h1><font color="#990000">FreeBSD Package building logs and errors</font></h1>
<p>FreeBSD maintains a build farm (the "pointyhat cluster") that is used
to build all packages across all supported architectures and major releases.
This page contains the build logs and errors for all the ports built by
the cluster. See the <a href="#notes">notes</a> for additional information,
or the <a href="#errorlist">types of package errors detected</a>.</p>
<h4><font color="#990000">
Port cross-reference summaries
</font></h4>
<p>These live on
<a href="http://portsmon.FreeBSD.org/index.html">portsmon.FreeBSD.org</a>.</p>
<dl>
<dd><img alt="*" src="/errorlogs/images/blue-ball.gif"> Build errors
<a href="http://portsmon.FreeBSD.org/portserrs.py">by portname</a>;
<a href="http://portsmon.FreeBSD.org/portscrossref.py">by portname vs. build environment</a>;
<a href="http://portsmon.FreeBSD.org/portserrcounts.py">by error type vs. build environment</a></dd>
<dd><img alt="*" src="/errorlogs/images/blue-ball.gif"> Problem Reports
for <a href="http://portsmon.FreeBSD.org/portsprsbyexplanation.py?explanation=existing">existing ports</a>;
for <a href="http://portsmon.FreeBSD.org/portsprsbyexplanation.py?explanation=new">new ports</a>;
for <a href="http://portsmon.FreeBSD.org/portsprsbyexplanation.py?explanation=framework">the ports framework</a>;
for <a href="http://portsmon.FreeBSD.org/portsprsbyexplanation.py?explanation=repocopy">repocopies requested</a>;
for <a href="http://portsmon.FreeBSD.org/portsprsbyexplanation.py?explanation=unknown">unknown</a></dd>
<dd><img alt="*" src="/errorlogs/images/blue-ball.gif"> Build Errors and
Problem Reports
<a href="http://portsmon.FreeBSD.org/portsconcordance.py">by portname</a>;
<a href="http://portsmon.FreeBSD.org/portsconcordanceformaintainer.py">for one maintainer</a>;
<a href="http://portsmon.FreeBSD.org/portsconcordanceforbroken.py">for broken ports</a>;
<a href="http://portsmon.FreeBSD.org/portsconcordancefordeprecated.py">for deprecated ports</a>;
<a href="http://portsmon.FreeBSD.org/portsconcordanceforforbidden.py">for forbidden ports</a></dd>
<dd><img alt="*" src="/errorlogs/images/blue-ball.gif"> Everything about
<a href="http://portsmon.FreeBSD.org/portoverview.py">one port</a></dd>
<dd><img alt="*" src="/errorlogs/images/blue-ball.gif"> The
<a href="http://portsmon.FreeBSD.org/index.html">complete list of all reports</a></dd>
</dl>
<h4><font color="#990000">
New build failures
</font></h4>
<p>Check here to find the most recent error log from your port.</p>
<!--#config timefmt="%F %R" -->
<dl>
<dd>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> New build
failures on 7.x-stable:
<a href="amd64-7-failure.html">amd64</a>
<a href="i386-7-failure.html">i386</a>
<a href="ia64-7-failure.html">ia64</a>
<a href="sparc64-7-failure.html">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> New build
failures on 8.x-stable:
<a href="amd64-8-failure.html">amd64</a>
<a href="i386-8-failure.html">i386</a>
<a href="ia64-8-failure.html">ia64</a>
<a href="powerpc-8-failure.html">powerpc</a>
<a href="sparc64-8-failure.html">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> New build
failures on 9.x-current:
<a href="amd64-9-failure.html">amd64</a>
<a href="i386-9-failure.html">i386</a>
<a href="ia64-9-failure.html">ia64</a>
<a href="powerpc-9-failure.html">powerpc</a>
<a href="sparc64-9-failure.html">sparc64</a>
<br>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> New build failures on 7.x-stable with experimental port patches:
<a href="amd64-7-exp-failure.html">amd64</a>
<a href="i386-7-exp-failure.html">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> New build failures on 8.x-stable with experimental port patches:
<a href="amd64-8-exp-failure.html">amd64</a>
<a href="i386-8-exp-failure.html">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> New build failures on 9.x-current with experimental port patches:
<a href="amd64-9-exp-failure.html">amd64</a>
<a href="i386-9-exp-failure.html">i386</a>
<br>
</dd>
</dl>
<h4><font color="#990000">
Error logs
</font></h4>
<dl>
<dd>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Previous run
on 7.x-stable:
<a href="amd64-7-full/">amd64</a>
<a href="i386-7-full/">i386</a>
<a href="ia64-7-full/">ia64</a>
<a href="sparc64-7-full/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Current run
on 7.x-stable:
<a href="amd64-7-latest/">amd64</a>
<a href="i386-7-latest/">i386</a>
<a href="ia64-7-latest/">ia64</a>
<a href="sparc64-7-latest/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Previous run
on 8.x-stable:
<a href="amd64-8-full/">amd64</a>
<a href="i386-8-full/">i386</a>
<a href="ia64-8-full/">ia64</a>
<a href="powerpc-8-full/">powerpc</a>
<a href="sparc64-8-full/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Current run
on 8.x-stable:
<a href="amd64-8-latest/">amd64</a>
<a href="i386-8-latest/">i386</a>
<a href="ia64-8-latest/">ia64</a>
<a href="powerpc-8-latest/">powerpc</a>
<a href="sparc64-8-latest/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Previous run
on 9.x-current:
<a href="amd64-9-full/">amd64</a>
<a href="i386-9-full/">i386</a>
<a href="ia64-9-full/">ia64</a>
<a href="powerpc-9-full/">powerpc</a>
<a href="sparc64-9-full/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Current run
on 9.x-current:
<a href="amd64-9-latest/">amd64</a>
<a href="i386-9-latest/">i386</a>
<a href="ia64-9-latest/">ia64</a>
<a href="powerpc-9-latest/">powerpc</a>
<a href="sparc64-9-latest/">sparc64</a>
<br>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Previous run
on 7.x-stable with experimental port patches:
<a href="amd64-7-exp-full/">amd64</a>
<a href="i386-7-exp-full/">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Current run
on 7.x-stable with experimental port patches:
<a href="amd64-7-exp-latest/">amd64</a>
<a href="i386-7-exp-latest/">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Previous run
on 8.x-stable with experimental port patches:
<a href="amd64-8-exp-full/">amd64</a>
<a href="i386-8-exp-full/">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Current run
on 8.x-stable with experimental port patches:
<a href="amd64-8-exp-latest/">amd64</a>
<a href="i386-8-exp-latest/">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Previous run
on 9.x-current with experimental port patches:
<a href="amd64-9-exp-full/">amd64</a>
<a href="i386-9-exp-full/">i386</a>
<br>
<img alt="*" src="/errorlogs/images/yellow-ball.gif"> Current run
on 9.x-current with experimental port patches:
<a href="amd64-9-exp-latest/">amd64</a>
<a href="i386-9-exp-latest/">i386</a>
</dd>
</dl>
<h4><font color="#990000">
Build logs (errors and otherwise)
</font></h4>
<dl>
<dd>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Previous run
on 7.x-stable:
<a href="amd64-7-full-logs">amd64</a>
<a href="i386-7-full-logs">i386</a>
<a href="ia64-7-full-logs">ia64</a>
<a href="sparc64-7-full-logs">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Current run on
7.x-stable:
<a href="amd64-7-latest-logs">amd64</a>
<a href="i386-7-latest-logs">i386</a>
<a href="ia64-7-latest-logs">ia64</a>
<a href="sparc64-7-latest-logs">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Previous run
on 8.x-stable:
<a href="amd64-8-full-logs">amd64</a>
<a href="i386-8-full-logs">i386</a>
<a href="ia64-8-full-logs">ia64</a>
<a href="powerpc-8-full-logs">powerpc</a>
<a href="sparc64-8-full-logs">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Current run on
8.x-stable:
<a href="amd64-8-latest-logs">amd64</a>
<a href="i386-8-latest-logs">i386</a>
<a href="ia64-8-latest-logs">ia64</a>
<a href="powerpc-8-latest-logs">powerpc</a>
<a href="sparc64-8-latest-logs">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Previous run
on 9.x-current:
<a href="amd64-9-full-logs">amd64</a>
<a href="i386-9-full-logs">i386</a>
<a href="ia64-9-full-logs">ia64</a>
<a href="powerpc-9-full-logs">powerpc</a>
<a href="sparc64-9-full-logs">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Current run on
9.x-current:
<a href="amd64-9-latest-logs">amd64</a>
<a href="i386-9-latest-logs">i386</a>
<a href="ia64-9-latest-logs">ia64</a>
<a href="powerpc-9-latest-logs">powerpc</a>
<a href="sparc64-9-latest-logs">sparc64</a>
<br>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Previous run
on 7.x-stable with experimental port patches:
<a href="amd64-7-exp-full-logs">amd64</a>
<a href="i386-7-exp-full-logs">i386</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Current run on
7.x-stable with experimental port patches:
<a href="amd64-7-exp-latest-logs">amd64</a>
<a href="i386-7-exp-latest-logs">i386</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Previous run
on 8.x-stable with experimental port patches:
<a href="amd64-8-exp-full-logs">amd64</a>
<a href="i386-8-exp-full-logs">i386</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Current run on
8.x-stable with experimental port patches:
<a href="amd64-8-exp-latest-logs">amd64</a>
<a href="i386-8-exp-latest-logs">i386</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Previous run
on 9.x-current with experimental port patches:
<a href="amd64-9-exp-full-logs">amd64</a>
<a href="i386-9-exp-full-logs">i386</a>
<br>
<img alt="*" src="/errorlogs/images/orange-ball.gif">Current run on
9.x-current with experimental port patches:
<a href="amd64-9-exp-latest-logs">amd64</a>
<a href="i386-9-exp-latest-logs">i386</a>
<br>
</dd>
</dl>
<h4><font color="#990000">
Packages
</font></h4>
<dl>
<dd>
<img alt="*" src="/errorlogs/images/purple-ball.gif"> Packages from
latest run on 6-stable:
<a href="amd64-6-packages-latest/">amd64</a>
<a href="i386-6-packages-latest/">i386</a>
<a href="sparc64-6-packages-latest/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/purple-ball.gif"> Packages from
latest run on 7-stable:
<a href="amd64-7-packages-latest/">amd64</a>
<a href="i386-7-packages-latest/">i386</a>
<a href="ia64-7-packages-latest/">ia64</a>
<a href="sparc64-7-packages-latest/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/purple-ball.gif"> Packages from
latest run on 8-stable:
<a href="amd64-8-packages-latest/">amd64</a>
<a href="i386-8-packages-latest/">i386</a>
<a href="ia64-8-packages-latest/">ia64</a>
<a href="powerpc-8-packages-latest/">powerpc</a>
<a href="sparc64-8-packages-latest/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/purple-ball.gif"> Packages from
latest run on 9-current:
<a href="amd64-9-packages-latest/">amd64</a>
<a href="i386-9-packages-latest/">i386</a>
<a href="ia64-9-packages-latest/">ia64</a>
<a href="powerpc-9-packages-latest/">powerpc</a>
<a href="sparc64-9-packages-latest/">sparc64</a>
<br>
<br>
<img alt="*" src="/errorlogs/images/purple-ball.gif"> Package
<a href="packagestats.html">building statistics</a> (current state of all package builds)
</dd>
</dl>
<h4><font color="#990000">
Archive
</font></h4>
<dl>
<dd><img alt="*" src="/errorlogs/images/red-ball.gif"> All error
logs:
<a href="amd64-errorlogs/">amd64</a>
<a href="i386-errorlogs/">i386</a>
<a href="ia64-errorlogs/">ia64</a>
<a href="powerpc-errorlogs/">powerpc</a>
<a href="sparc64-errorlogs/">sparc64</a>
<br>
<img alt="*" src="/errorlogs/images/red-ball.gif"> All portbuild
logs:
<a href="amd64-buildlogs/">amd64</a>
<a href="i386-buildlogs/">i386</a>
<a href="ia64-buildlogs/">ia64</a>
<a href="powerpc-buildlogs/">powerpc</a>
<a href="sparc64-buildlogs/">sparc64</a>
</dd>
</dl>
<a name="notes">
<h3><font color="#990000">Additional information</font></h3>
</a>
<p>All of the "Current run" links are possibly in progress and may be
partial, so keep that in mind if there appear to be some missing.</p>
<p>None of the ports marked <tt>IS_INTERACTIVE</tt> or
<tt>NO_PACKAGE</tt> are built any more -- if you have ports that
fall into those categories, assume their packages or distfiles will
never show up in ftp sites or CDROMs. <tt>RESTRICTED</tt> packages
are built and deleted (using "<tt>make
clean-restricted-list</tt>"). <tt>NO_CDROM</tt> packages are built
but deleted (using "<tt>make clean-cdrom-list</tt>") before being
put on a CDROM.</p>
<p>See also the <a href="errors.html">types of errors detected</a>.</p>
<p>Notes on the building process:</p>
<ul>
<li><em>Every</em> port is built in its own chroot environment
<!-- MCL 20090823 broken links
(
<a href="i386-7-bindist.tar">tarball for 7.x-stable</a>
|<a href="i386-8-bindist.tar">tarball for 8.x-stable</a>
|<a href="i386-9-bindist.tar">tarball for 9.x-current</a>
)
-->
, starting with an
empty <tt>/usr/local</tt> and <tt>/usr/X11R6</tt>. <a name=
"pkgadd">The dependencies are installed as packages</a> just before
the build. You can see the list of dependencies on the third line
of the log -- the "foo.tgz bar.tgz" stuff are the dependencies. To
make sure that these actually work, <tt>DEPENDS_TARGET</tt> is set
to "/usr/bin/true"; if you see "/usr/bin/true is up to date" or
some such, that means there is something wrong with the dependency
lines or the packages this port is depending on.</li>
<li>The build is done on a shared (read-only) <tt>/usr/ports</tt>
with <a href=
"http://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/porting-wrkdir.html">
<tt>WRKDIRPREFIX</tt></a> set to <tt>/work</tt>. If your
<tt>WRKSRC</tt> looks funny, that's probably why.</li>
<li><tt>MASTER_SITE_OVERRIDE</tt> is pointing to pointyhat's distfile
dir, and <tt>MASTER_SITE_BACKUP</tt> is pointing to
ftp-master.freebsd.org or a local mirror. <!--Wrong!
The one on pointyhat is empty when the build starts,
and every successful build will copy distfiles to there. This means
that if there are a few ports that share the same distfile, only
the first one will have to go to the original master site (provided
the second build starts after the first one ends). After the entire
build process, the distfiles are copied over to ftp.freebsd.org.
This means if your port built successfully once, you will never see
a "can't fetch distfile" error again.-->
Please pay attention to
<a href="http://people.freebsd.org/~ehaupt/distilator/">
Emanuel Haupt's distfile checker</a>
for that.</li>
<li>The ports are built on machines that are mostly running
9-CURRENT, with some 8.x-STABLE machines. If your port depends on
the result of uname(3) or sysctl to determine the running version of
FreeBSD, change it to use uname(1) instead (the builds use a dummy
uname(1) that reports the target version of FreeBSD), or change it to
use the value of the OSVERSION variable that can be passed in from
the port makefile. </li> </ul>
<a name="errorlist"></a>
<h2><font color="990000">Types of package errors detected</font></h2>
<p>Here is the <b>alphabetical list of current errors</b>
detected by the AI script. Note that this is all just a rough guess --
it is merely for your aid.</p>
<p>Key:
<dl>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif">The most common
errors.</dt>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif">Some less common
errors.</dt>
<dt><img alt="(transient)" src="/errorlogs/images/green-ball.gif">Transient
errors. These may not be your fault.</dt>
</dl>
</p>
<p>
<dl>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"arch">arch</a></dt>
<dd>The port does not build on a particular architecture, due to
assembler or linker errors. In some easy cases this is due to
not picking up the various <tt>ARCH</tt> configuration variables
in the Makefile; you'll see this via, e.g., a Sparc <tt>make</tt>
failing while looking for an i386 subdirectory. For the 64-bit
architectures, a common problem is the assumption many programmers
make that pointers may be cast to and from 32-bit ints. In other cases
the problems run much deeper, in which case <tt>ONLY_FOR_ARCHS</tt>
may be needed.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"autoconf">autoconf</a></dt>
<dd>Your port depends on <tt>autoconf</tt>, but the <tt>Makefile</tt>
either doesn't have <tt>USE_AUTOCONF</tt>, or does not use
<tt>USE_AUTOCONF_VER</tt> correctly.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"autoheader">autoheader</a></dt>
<dd>Your port depends on <tt>autoheader</tt>, but the <tt>Makefile</tt>
cannot find it; set <tt>USE_AUTOHEADER</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"automake">automake</a></dt>
<dd>Your port depends on <tt>automake</tt>, but the <tt>Makefile</tt>
either doesn't have <tt>USE_AUTOMAKE</tt>, or does not use
<tt>USE_AUTOMAKE_VER</tt> correctly.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"badc++">bad C++ code</a></dt>
<dd>There is a compiler error which is caused by something specific
to C++.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"cc">compiler error</a></dt>
<dd>There is a C compiler error which is caused by something other
than e.g. "new compiler error".</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"categories">CATEGORIES</a></dt>
<dd>The <tt>CATEGORIES</tt> line in <tt>Makefile</tt> includes an
invalid category.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"checksum">checksum</a></dt>
<dd>The checksum of one or more of the files is incorrect.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"clang">clang</a></dt>
<dd>Your code does not run with the the experimental <tt>clang</tt> compiler. See
(TBA)
for further information.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"clang-bug">clang bug</a></dt>
<dd>You have tickled a bug in <tt>clang</tt> itself. See
(TBA)
for further information.</dd>
<dt><img alt="(transient)" src="/errorlogs/images/green-ball.gif"><a name=
"cluster">cluster</a></dt>
<dd>There was some kind of transient error on the build cluster. It is not your
fault.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"compat6x">compat6x</a></dt>
<dd>This port needs to depend on a port <tt>misc/compat6x</tt>.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"configure">configure error</a></dt>
<dd>The port's <tt>configure</tt> script produced some kind of
error.
(Note: using <tt>clang</tt> as the ports compiler can also trigger this message.)</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"coredump">coredump</a></dt>
<dd>Some process in the build chain dropped core. While your port may indeed
be faulty, the process that dropped core should also be fixed.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"cpusetsize">cpusetsize</a></dt>
<dd>This port needs to catch up with the <tt>cpusetsize</tt> sysctl change in 9-CURRENT.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"dependobj">depend object</a></dt>
<dd>The port is trying to reinstall a dependency that already
exists. This is usually caused by the first field of a
<tt>*_DEPENDS</tt> line (the <tt>obj</tt> of
<tt>obj:dir[:target]</tt>) indicating a file that is not installed
by the dependency, causing it to be rebuilt even though it has
already been <a href="#pkgadd">added from a package</a>.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"dependpkg">depend package</a></dt>
<dd>There was an error during <a href="#pkgadd">adding dependencies
from packages</a>. It is the fault of the package being added, not
this port.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"dirent">dirent</a></dt>
<dd>The port has not caught up with the change to <code>scandir(3)</code>
as committed in FreeBSD version 800501.</dd>
<dt><img alt="(transient)" src="/errorlogs/images/green-ball.gif"><a name=
"df">disk full</a></dt>
<dd>The disk filled up on the build system. It is not your
fault.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"display">DISPLAY</a></dt>
<dd>This port requires an X display to build. There is nothing you
can do about it unless you can somehow make it not require an X
connection.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"distinfo">distinfo update</a></dt>
<dd>The contents of <tt>distinfo</tt> does not match the list of
distfiles or patchfiles.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"fetch">fetch</a></dt>
<dd>One or more of the files could not be fetched.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"fetch-timeout">fetch timeout</a></dt>
<dd>Your fetch process was killed because it took too long. (More
accurately, it did not produce any output for a long time.) Please
put sites with better connectivity near the beginning of
<tt>MASTER_SITES</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"forbidden">forbidden</a></dt>
<dd>Someone has marked this port as "forbidden", almost always due
to security concerns. See the logfile for more information.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"gcc-bug">gcc bug</a></dt>
<dd>You have tickled a bug in gcc itself. See the
<a href="http://www.gnu.org/software/gcc/bugs.html">GNU bug report documentation</a>
for further information.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"gcc4">gcc4</a></dt>
<dd>Your code does not run with the latest gcc version
See <a href="http://wiki.freebsd.org/gcc4">the wiki page</a>
for further information.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"gmake">gmake</a></dt>
<dd>Your code does not run with the latest, incompatible, gmake version
(3.82.)</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"install">install error</a></dt>
<dd>There was an error during installation.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"libdepends">LIB_DEPENDS</a></dt>
<dd>The <tt>LIB_DEPENDS</tt> line specifies a library name
incorrectly. This often happens when a port is upgraded and the
shared library version number changes.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"ld">linker error</a></dt>
<dd>There is a linker error which is caused by something other than
those flagged elsewhere.
(Note: using <tt>clang</tt> as the ports compiler can also trigger this message.)</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"makefile">makefile</a></dt>
<dd>There is an error in the <tt>Makefile</tt>, possibly in the default
targets.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"manpage">manpage</a></dt>
<dd>There is a manpage listed in a <tt>MAN?</tt> macro that does not
exist or is not installed in the right place.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"header">missing header</a></dt>
<dd>There is a missing header file. This is usually caused by
either (1) a missing dependency, or (2) specifying an incorrect
location with <tt>-I</tt> in the compiler command line.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"mtree">mtree</a></dt>
<dd>The port leaves <tt>${PREFIX}</tt> in a state that is not
consistent with the mtree definition after <tt>pkg_delete</tt>. This
usually means some files are missing from <tt>PLIST</tt>. It could
also mean that your installation scripts create files or
directories not properly deleted by the deinstallation scripts.
Another possibility is that your port is deleting some directories
it is not supposed to, or incorrectly modifying some directory's
permission.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"nested_declaration">nested_declaration</a></dt>
<dd>There is a nested declaration in the source code.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"newgcc">new compiler error</a></dt>
<dd>The newest version of gcc in the base does not like the source code. This is
usually due to stricter C++ type checking or changes in register
allocation policy.</dd>
<dt><img alt="(transient)" src="/errorlogs/images/green-ball.gif"><a name=
"nfs">NFS</a></dt>
<dd>There was either a temporary NFS error on the build system
(which is not your fault), or the <tt>WRKSRC</tt> is invalid
(which is your fault).</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"patch">patch</a></dt>
<dd>One or more of the patches failed.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"perl">perl</a></dt>
<dd><tt>perl</tt> is no longer included by default in the base
system, but your port's configuration process depends on it. While
this change helps avoid having a stale version of <tt>perl</tt>
in the base system, it also means that many ports now need to include
<tt>USE_PERL5</tt>.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"plist">PLIST</a></dt>
<dd>There is a missing item in the <tt>PLIST</tt>. Note that this is
often caused by an earlier error that went undetected. In this case,
you should fix the error and also the build process so it will fail
upon an error instead of continuing, since that makes debugging
that much harder.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"process">process failed</a></dt>
<dd>The <tt>make</tt> process terminated unexpectedly, due to
something like a signal 11 or bus error.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"runaway">runaway process</a></dt>
<dd>Your <tt>make package</tt> process was killed because it took
too long. (More accurately, it did not produce any output for a long
time.) It is probably because there is a process spinning in an infinite
loop. Please check the log to determine the exact cause of the
problem.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"segfault">segfault</a></dt>
<dd>Some process in the build chain got a segmentation fault.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"sem_wait">sem_wait</a></dt>
<dd>This port needs to catch up with semaphore changes in 9-CURRENT.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"termios">termios</a></dt>
<dd>This port needs to catch up with the <tt>termios.h</tt> changes in src.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"threads">threads</a></dt>
<dd>There is a linker error which is caused by failing to find one of
the thread libraries.</dd>
<dt><img alt="(transient)" src="/errorlogs/images/green-ball.gif"><a name=
"truncated_distfile">truncated_distfile</a></dt>
<dd>A package node encountered an error during pkg_add. It is not your
fault. Linimon is trying to figure out this problem.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"utmp_x">utmp_x</a></dt>
<dd>This port needs to catch up with the <tt>utmp_x.h</tt> changes in src.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"wrkdir">WRKDIR</a><a name="WRKDIR"></a></dt>
<dd>The port is attempting to change something outside
<tt>${WRKDIR}</tt>. See <a href=
"http://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/porting-wrkdir.html">handbook</a>
for details.</dd>
<dt><img alt="(common)" src="/errorlogs/images/blue-ball.gif"><a name=
"unknown">??? (unknown)</a></dt>
<dd>The automated script cannot even guess what is wrong with your
port. <tt>portmgr</tt> tries to keep the <tt>processonelog</tt> script
reasonably efficient while covering as many errors as possible, but many
errors are not common enough to try to catch.</dd>
</dl>
<p>Here is an <b>alphabetical list of obsolete errors</b></a>
that used to be detected by the AI script, but are now uncommon enough to
be skipped:</p>
<dl>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"alignment">alignment</a></dt>
<dd>You've managed to confuse the assembler with a misaligned
structure.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"apxs">apxs</a></dt>
<dd>Your port depends on Apache (in particular, the <tt>apxs</tt>
binary) but the <tt>Makefile</tt> doesn't have Apache in
<tt>BUILD_DEPENDS</tt> and/or <tt>LIB_DEPENDS</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"assert">assert</a></dt>
<dd>Compilation failed due to an assert. This is often a variation
on <tt>arch</tt> or <tt>missing header</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"awk">awk</a></dt>
<dd><tt>awk</tt> is complaining about some kind of bogus string
expression.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"bison">bison</a></dt>
<dd>Your port requires <tt>bison</tt>, which does not exist in
4.x-stable or newer anymore. Either patch it to use <tt>byacc</tt>
instead, or define <tt>USE_BISON</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"cgi-bin">cgi-bin</a></dt>
<dd>Your port assumes that a directory (usually
<tt>/usr/local/www/cgi-bin</tt>) already exists,
but by default it doesn't.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"chown">chown</a></dt>
<dd><tt>POSIX</tt> has deprecated the usage
"<tt>chown user.group filename</tt>" in favor of
"<tt>chown user:group filename</tt>". This happened quite some time
ago, actually, but it is only now being enforced. (The change was
made to allow '.' in usernames).</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"elf">ELF</a></dt>
<dd>The port does not properly work in the new ELF world. It is
probably looking for an <tt>a.out</tt> object (e.g.,
<tt>crt0.o</tt>).</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"f77">f77</a></dt>
<dd><tt>gcc</tt> in base no longer includes the <tt>Fortran</tt> compiler
by default.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"ffs_conflict">ffs conflict</a></dt>
<dd>Both <tt>/usr/include/machine/cpufunc.h</tt> and
<tt>/usr/include/strings.h</tt> are attempting to define <tt>int ffs()</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"getopt.h">getopt.h</a></dt>
<dd><tt>&lt;getopt.h&gt;</tt> is conflicting with <tt>unistd.h</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif">
<a name="getopt">getopt</a></dt>
<dd>Your port may need to set the new port variable
<tt>USE_GETOPT_LONG</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"lc_r">libc_r not found</a></dt>
<dd>This library has not yet been ported to e.g. the Sparc.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"malloc.h">malloc.h</a></dt>
<dd>Including <tt>&lt;malloc.h&gt;</tt> is now deprecated in favor of
<tt>&lt;stdlib.h&gt;</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"motif">MOTIF</a></dt>
<dd>This port requires Motif but does not define
<tt>REQUIRES_MOTIF</tt>. See the <a href=
"http://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/porting-motif.html">handbook</a>
for details.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"motiflib">MOTIFLIB</a></dt>
<dd>This port requires Motif but does not refer to the libraries
using <tt>${MOTIFLIB}</tt>. See <a href=
"http://www.freebsd.org/doc/en_US.ISO8859-1/books/porters-handbook/porting-motif.html">handbook</a>
for details.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"munmap">munmap</a></dt>
<dd>POSIX specifies that munmap cannot be called on a zero-length file.
Because of this, during 4.X builds, if cp tries to copy a zero-length file, it
may fail saying, "cp: ...: Invalid argument". This is a problem with the
bindist image on pointyhat, and not the fault of the porter.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"perl5">perl5</a></dt>
<dd>There is a problem in processing a <tt>perl5</tt> module.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"pod2man">pod2man</a></dt>
<dd><tt>perl</tt> is no longer included by default in the base
system, but your port's documentation process depends on it. While
this change helps avoid having a stale version of <tt>perl</tt>
in the base system, it also means that many ports now need to include
<tt>USE_PERL5</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"python">python</a></dt>
<dd>The <tt>Makefile</tt> needs to define <tt>USE_PYTHON</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"sed">sed</a></dt>
<dd><tt>sed</tt> is complaining about some kind of bogus regular
expression, probably as a side-effect of its being invoked by
<tt>${REINPLACE_COMMAND}</tt>. This is often a result of having
replaced usages of <tt>perl</tt> in the <tt>Makefile</tt> with usages
of <tt>${REINPLACE_COMMAND}</tt> but having left
<tt>perl</tt>-specific regexps in place.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"soundcard.h">soundcard.h</a></dt>
<dd><tt>machine/soundcard.h</tt> has been moved.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"stdio">stdio</a></dt>
<dd>You need to bring your port up to date with the current
<tt>&lt;stdio.h&gt;</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"stl">stl</a></dt>
<dd>Your port requires the <tt>STL</tt> library but cannot find it.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"struct">struct changes</a></dt>
<dd>Your port is trying to refer to structure elements that are not
really there. This is often due to changes in the underlying
include files.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"sysvipc">sysvipc</a></dt>
<dd>Your port is interacting badly with the System V InterProcess
Communication code.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"uname">uname</a></dt>
<dd>For a short period of time, gcc was not handling uname properly.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"values.h">values.h</a></dt>
<dd><tt>values.h</tt> has been moved.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"texinfo">texinfo</a></dt>
<dd>The new <tt>makeinfo</tt> cannot process a texinfo source file.
You can probably add a "--<tt>no-validate</tt>" option to force it
through if you are sure it's correct regardless of what
<tt>makeinfo</tt> says.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"wait">union wait</a></dt>
<dd>The compiler could not calculate the storage size of an object,
often due to misuse of a union.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"usexlib">USE_XLIB</a></dt>
<dd>You should specify <tt>USE_XLIB</tt> for this port since it
appears to use X.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"varargs">varargs</a></dt>
<dd><tt>varargs.h</tt> is obsolete with newer versions of <tt>gcc</tt>.</dd>
<dt><img alt="(uncommon)" src="/errorlogs/images/purple-ball.gif"><a name=
"xfree4man">X manpage</a></dt>
<dd>This port does not install a manpage but <tt>imake</tt> rules are
generating commands to convert manpages to HTML format. This is
most likely fixed by changing <tt>ComplexProgramTarget()</tt> in
<tt>Imakefile</tt> to <tt>ComplexProgramTargetNoMan()</tt>. Note that
defining <tt>NO_INSTALL_MANPAGES</tt> in the <tt>Makefile</tt> is no
longer sufficient in XFree86-4.</dd>
</dl>
<hr>
<center><a href="http://www.freebsd.org/ports/">The ports page</a>
| Maintained by <a href="mailto:portmgr@FreeBSD.org">portmgr@FreeBSD.org</a>
| Last modified
<br>
$FreeBSD$
</center>
</body></html>
</html>

View File

@ -1,2 +0,0 @@
paddock 3
builder 2

View File

@ -1,29 +0,0 @@
# $FreeBSD$
# sample configuration file for package build
arch=i386
domain=FreeBSD.org
ftpsite=ftp-master.$domain
distfiledir=w/ports/distfiles
packagedir=w/ports/${arch}/tmp
master=pointyhat.freebsd.org
pb=/var/portbuild
user=ports-${arch}
client_user=root
mailto=kris@FreeBSD.org,linimon@FreeBSD.org,pav@FreeBSD.org
scratchpart=/x
scratchdir=${scratchpart}/tmp
branches="6 7 6-exp 7 7-exp 8 8-exp 9 9-exp"
squid_dir=${scratchpart}/squid
maxjobs=3
sudo_cmd=
use_md_swap=0
md_size=11g
use_jail=1
#ccache_dir=dumpster:/vol/vol3/ccache
#ccache_dir_nfs=1
#
# add your own variables down here to avoid CVS merge conflicts
#

View File

@ -1,56 +0,0 @@
#!/bin/sh
doarch() {
arch=$1
shift
if [ -f "${pb}/${arch}/portbuild.conf" ]; then
. ${pb}/${arch}/portbuild.conf
else
echo "Invalid arch"
exit 1
fi
machines=$(awk '{print $1}' ${pb}/${arch}/mlist)
for i in ${machines}; do
. ${pb}/${arch}/portbuild.conf
if [ -f "${pb}/${arch}/portbuild.${i}" ]; then
. ${pb}/${arch}/portbuild.${i}
fi
if [ ${quiet} -eq 0 ]; then
echo "[$i]"
fi
su ports-${arch} -c "ssh ${client_user}@$i ${sudo_cmd} $@"
done
}
id=$(whoami)
if [ "${id}" = "root" ]; then
arch=$1
shift
root=1
else
arch=$(echo $id | sed s,ports-,,)
root=0
fi
if [ "$1" = "-q" ]; then
quiet=1
shift
else
quiet=0
fi
pb=/var/portbuild
. ${pb}/conf/server.conf
if [ "${arch}" = "all" ]; then
arches=$(find ${pb}/*/portbuild.conf)
for i in ${arches}; do
arch=$(basename $(dirname $i))
doarch $arch "$@"
done
else
doarch $arch "$@"
fi

View File

@ -1,60 +0,0 @@
#!/bin/sh
doarch() {
arch=$1
shift
if [ -f "${pb}/${arch}/portbuild.conf" ]; then
. ${pb}/${arch}/portbuild.conf
else
echo "Invalid arch"
exit 1
fi
machines=$(awk '{print $1}' ${pb}/${arch}/mlist)
for i in ${machines}; do
. ${pb}/${arch}/portbuild.conf
if [ -f "${pb}/${arch}/portbuild.${i}" ]; then
. ${pb}/${arch}/portbuild.${i}
fi
if [ ${quiet} -eq 0 ]; then
echo "[$i]"
fi
lockf -t 60 ${pb}/${arch}/lockfiles/lock.$i su ports-${arch} -c "ssh ${client_user}@$i ${sudo_cmd} $@"
result=$?
if [ $result -ne 0 ]; then
echo "could not execute command $@ on $i: $result"
fi
done
}
id=$(whoami)
if [ "${id}" = "root" ]; then
arch=$1
shift
root=1
else
arch=$(echo $id | sed s,ports-,,)
root=0
fi
if [ "$1" = "-q" ]; then
quiet=1
shift
else
quiet=0
fi
pb=/var/portbuild
. ${pb}/conf/server.conf
if [ "${arch}" = "all" ]; then
arches=$(find ${pb}/*/portbuild.conf)
for i in ${arches}; do
arch=$(basename $(dirname $i))
doarch $arch "$@"
done
else
doarch $arch "$@"
fi

View File

@ -1,54 +0,0 @@
#!/bin/sh
# prints out logs that are in dir1 but not in dir2
if [ $# -ne 3 ]; then
echo "usage: $0 arch dir1 dir2"
exit 1
fi
here=$(pwd)
arch=$1
dir1=$2
dir2=$3
fdir1=$here/${arch}-$dir1
fdir2=$here/${arch}-$dir2
ldir2=$(cd $fdir2; pwd | sed -e 's/e\./a./')
plus="$(echo $2 $3 | sed -e 's/ /+/g')"
of=$here/$arch-$plus.html
echo "<html><head><title>Logs that are in both $dir1 and $dir2</title>" >$of
echo "<h1>Logs that are in both $dir1 and $dir2</h1>" >>$of
echo "</head><body>" >>$of
cd $fdir1
logs=$(find . -name \*.log -o -name \*.log.bz2 | sed -e 's/\.log\.bz2/\.log/g')
nlogs=$(echo $logs | wc -w)
if [ $nlogs -eq 0 ]; then
echo "No errors" >>$of;
else
num=0
echo "<table border=1>" >>$of
echo "<tr><th>Log</th></tr>" >>$of
for i in $logs; do
if [ -f ${fdir2}/${i}.bz2 -o -f ${fdir2}/${i} ]; then
fname1=$(basename $i .bz2)
fname=$(basename $fname1 .log)
echo -n "<tr>" >> $of
echo -n "<td><a href=\"$arch-$dir1/$fname.log\">$fname</a></td>" >>$of
echo -n "<td><a href=\"$arch-$dir2/$fname.log\">$fname</a></td>" >>$of
echo "</tr>" >>$of
num=$(($num + 1))
fi
done
echo "</table><br>" >> $of
echo "$num errors<br>" >> $of
fi
echo "<hr>" >> $of
echo "<a href=\"../\">back to top</a>" >> $of
echo "</body></html>" >>$of

View File

@ -1,599 +0,0 @@
#!/bin/sh
# $FreeBSD$
# server-side script to handle various commands common to builds
# configurable variables
pb=/var/portbuild
# subdirectories to process. yes, this is a hack, but it saves code
# duplication.
quoted_subdirs="'/src' '/ports' ''"
# XXX unused
get_latest_snap() {
snap=$1
zfs list -rHt snapshot ${snap} | tail -1 | awk '{print $1}'
}
now() {
date +%Y%m%d%H%M%S
}
do_list() {
arch=$1
branch=$2
buildpar=/var/portbuild/${arch}/${branch}/builds
if [ -d ${buildpar} ]; then
snaps=$(cd ${buildpar}; ls -1d 2* 2> /dev/null)
echo "The following builds are active:"
echo ${snaps}
if [ -L ${buildpar}/latest -a -d ${buildpar}/latest/ ]; then
link=$(readlink ${buildpar}/latest)
link=${link%/}
link=${link##*/}
echo "Latest build is: ${link}"
fi
else
echo "No such build environment ${arch}/${branch}"
exit 1
fi
}
do_create() {
arch=$1
branch=$2
buildid=$3
shift 3
archivedir=${pb}/${arch}/archive
# create directory for all build logs
buildlogsdir=${archivedir}/buildlogs
if [ ! -d ${buildlogsdir} ]; then
mkdir -p ${buildlogsdir} || exit 1
chown -R ports-${arch}:portmgr ${archivedir}
chmod -R g+w ${archivedir}
fi
# create directory for all builds for buildenv
buildsdir=${pbab}/builds
if [ ! -d ${buildsdir} ]; then
mkdir -p ${buildsdir} || exit 1
chown -R ports-${arch}:portmgr ${pbab}
chmod -R g+w ${pbab}
fi
# create directory for latest build for buildenv
builddir=$(realpath ${buildsdir})/${buildid}
if [ -d ${builddir} ]; then
echo "Can't create ${builddir}, it already exists"
exit 1
fi
# create zfs instance for latest build on buildenv
mountpoint=${builddir}
newfs=${ZFS_VOLUME}/portbuild/${arch}/${buildid}
zfs create -o mountpoint=${mountpoint} ${newfs} || exit 1
chown -R ports-${arch}:portmgr ${mountpoint}
chmod -R g+w ${mountpoint}
# populate ports for latest build on buildenv
do_portsupdate_inner ${arch} ${branch} ${buildid} ${builddir} $@
# populate src for latest build on buildenv
do_srcupdate_inner ${arch} ${branch} ${buildid} ${builddir} $@
# create the link for building packages for latest build
ln -sf ${builddir} ${pbab}/builds/latest
# create the links for the webserver
# XXX MCL hardcoding
errorlogs=/var/portbuild/errorlogs
ln -s ${buildsdir}/latest/bak/errors ${errorlogs}/${arch}-${branch}-previous
ln -s ${buildsdir}/latest/bak/logs ${errorlogs}/${arch}-${branch}-previous-logs
ln -s ${buildsdir}/latest/errors ${errorlogs}/${arch}-${branch}-latest
ln -s ${buildsdir}/latest/logs ${errorlogs}/${arch}-${branch}-latest-logs
ln -s ${buildsdir}/latest/bak/packages ${errorlogs}/${arch}-${branch}-packages-previous
ln -s ${buildsdir}/latest/packages ${errorlogs}/${arch}-${branch}-packages-latest
echo "New build ID is ${buildid}"
}
do_clone() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
if [ "$#" -gt 0 ]; then
newid=$1
shift
else
newid=$(now)
fi
tmp=$(realpath ${builddir})
tmp=${tmp%/}
newbuilddir="${tmp%/*}/${newid}"
oldfs=${ZFS_VOLUME}/portbuild/${arch}/${buildid}
newfs=${ZFS_VOLUME}/portbuild/${arch}/${newid}
zfs snapshot ${oldfs}@${newid}
zfs clone ${oldfs}@${newid} ${newfs}
zfs set mountpoint=${newbuilddir} ${newfs}
zfs promote ${newfs}
if zfs list -H -t filesystem ${oldfs}/ports 2> /dev/null; then
portsnap=${oldfs}/ports@${newid}
zfs snapshot ${portsnap}
zfs clone ${portsnap} ${newfs}/ports
zfs promote ${newfs}/ports
fi
if zfs list -H -t filesystem ${oldfs}/src 2> /dev/null; then
srcsnap=${oldfs}/src@${newid}
zfs snapshot ${srcsnap}
zfs clone ${srcsnap} ${newfs}/src
zfs promote ${newfs}/src
fi
if [ -d ${newbuilddir} ]; then
if [ ! -f ${pbab}/builds/previous/.keep ]; then
/var/portbuild/scripts/build destroy ${arch} ${branch} previous
fi
rm -f ${pbab}/builds/previous
mv ${pbab}/builds/latest ${pbab}/builds/previous
ln -sf ${newbuilddir} ${pbab}/builds/latest
fi
echo "New build ID is ${newid}"
}
do_portsupdate() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
if [ $# -gt 0 ]; then
arg=$1
shift
fi
destroy_fs ${ZFS_VOLUME}/portbuild/${arch} ${buildid} /ports || exit 1
if [ "${arg}" = "-umount" ]; then
return
fi
echo
do_portsupdate_inner ${arch} ${branch} ${buildid} ${builddir} $@
}
do_portsupdate_inner() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
echo "================================================"
echo "Reimaging ZFS ports tree on ${builddir}/ports"
echo "================================================"
portsfs=${ZFS_VOLUME}/portbuild/${arch}/${buildid}/ports
now=$(now)
zfs snapshot ${ZFS_VOLUME}/${SNAP_PORTS_DIRECTORY}/ports@${now}
zfs clone ${ZFS_VOLUME}/${SNAP_PORTS_DIRECTORY}/ports@${now} ${portsfs}
zfs set mountpoint=${builddir}/ports ${portsfs}
}
do_srcupdate() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
if [ $# -gt 0 ]; then
arg=$1
shift
fi
destroy_fs ${ZFS_VOLUME}/portbuild/${arch} ${buildid} /src || exit 1
if [ "${arg}" = "-umount" ]; then
return
fi
echo
do_srcupdate_inner ${arch} ${branch} ${buildid} ${builddir} $@
}
do_srcupdate_inner() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
echo "================================================"
echo "Reimaging ZFS src tree on ${builddir}/src"
echo "================================================"
strippedbranch=${branch%%[-\.]*}
srcfs=${ZFS_VOLUME}/portbuild/${arch}/${buildid}/src
now=$(now)
zfs snapshot ${ZFS_VOLUME}/${SNAP_SRC_DIRECTORY_PREFIX}${strippedbranch}/src@${now}
zfs clone ${ZFS_VOLUME}/${SNAP_SRC_DIRECTORY_PREFIX}${strippedbranch}/src@${now} ${srcfs}
zfs set mountpoint=${builddir}/src ${srcfs}
}
cleanup_client() {
arch=$1
branch=$2
buildid=$3
mach=$4
arg=$5
# XXX use same exclusion protocol as claim-chroot
echo "Started cleaning up ${arch}/${branch} build ID ${buildid} on ${mach}"
test -f ${pb}/${arch}/portbuild.${mach} && . ${pb}/${arch}/portbuild.${mach}
# Kill off builds and clean up chroot
${pb}/scripts/dosetupnode ${arch} ${branch} ${buildid} ${mach} -nocopy -queue -full
echo "Finished cleaning up ${arch}/${branch} build ID ${buildid} on ${mach}"
}
do_cleanup() {
arch=$1
branch=$2
buildid=$3
builddir=$4
arg=$5
shift 5
for i in `cat ${pb}/${arch}/mlist`; do
cleanup_client ${arch} ${branch} ${buildid} ${i} ${arg} &
done
wait
}
do_upload() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
echo "Not implemented yet"
exit 1
}
test_fs() {
local fs=$1
zfs list -Ht filesystem ${fs} > /dev/null 2>&1
}
get_latest_child() {
local fs=$1
# Return the child of this filesystem with lexicographically
# highest name
#
# XXX if a filesystem is cloned into a different prefix
# (e.g. different arch) then we may not get the most recent one
# but that should not happen.
zfs get -H -o name,value origin | grep ${fs} | sort | \
(while read zfs origin; do
if [ "${origin%@*}" = "${fs}" ]; then
child=${zfs}
fi
done; echo ${child})
}
get_parent() {
local fs=$1
# Check whether this filesystem has a parent
zfs get -H -o value origin ${fs} | \
(read snap;
case "${snap}" in
-|${ZFS_VOLUME}/${SNAP_DIRECTORY}/*)
;;
*)
parent=${snap}
;;
esac; echo ${parent})
}
destroy_fs() {
fs=$1
buildid=$2
subfs=$3
fullfs=${fs}/${buildid}${subfs}
if test_fs "${fullfs}"; then
# We can destroy a leaf filesystem (having no dependent
# clones) with no further effort. However if we are
# destroying the root of the clone tree then we have to
# promote a child to be the new root.
#
# XXX In principle we might have to iterate until we end up as
# a leaf but I don't know if this can happen.
echo "Filesystem ${fullfs}"
child=$(get_latest_child ${fullfs})
parent=$(get_parent ${fullfs})
echo "Filesystem has parent ${parent}"
if [ -z "${child}" ]; then
echo "Filesystem is a leaf"
echo
else
echo "Filesystem has latest child ${child}"
# Check whether filesystem is root
if [ -z "${parent}" ]; then
echo "Filesystem is root; promoting ${child}"
zfs promote ${child}
parent=$(get_parent ${fullfs})
echo "New parent is ${parent}"
echo
else
echo "Filesystem has parent ${parent} and cannot be destroyed"
echo
return 1
fi
fi
# We might have snapshots on the target filesystem, e.g. if it
# is both the head and tail of its clone tree. They should be
# unreferenced.
# We have to grep because zfs list -H returns an error instead of
# a null list if no snapshots exist
if ! (zfs list -r -H -o name -t snapshot ${fullfs} | grep "^${fullfs}@" | xargs -n 1 zfs destroy); then
return 1
fi
# The target filesystem should now be unreferenced
if ! zfs destroy -f "${fullfs}"; then
return 1
fi
# Destroy the origin snapshot, which should be unreferenced
if [ ! -z "${parent}" ]; then
if ! zfs destroy -f ${parent}; then
return 1
fi
fi
fi
}
do_destroy() {
arch=$1
branch=$2
buildid=$3
builddir=$4
shift 4
echo
buildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${buildid}" ]; then
echo "Invalid build ID ${buildid}"
exit 1
fi
# perform sanity check to prevent e.g. "latest" being destroyed
# before "previous"
for quoted_subdir in $quoted_subdirs; do
fullfs=${ZFS_VOLUME}/portbuild/${arch}/${buildid}`echo $quoted_subdir | sed -e "s/'//g"`
if test_fs "${fullfs}"; then
child=$(get_latest_child ${fullfs})
parent=$(get_parent ${fullfs})
if [ ! -z "${child}" -a ! -z "${parent}" ]; then
echo "Filesystem ${fullfs} has parent ${parent} and child ${child}"
echo "and thus cannot be destroyed. Run 'build destroy' on the child, first."
fi
fi
done
# now ok to do the actual destroy.
for quoted_subdir in $quoted_subdirs; do
destroy_fs ${ZFS_VOLUME}/portbuild/${arch} ${buildid} `echo $quoted_subdir | sed -e "s/'//g"` || exit 1
done
rmdir ${builddir}
}
# Run a command as root if running as user
# Authentication and command validation is taken care of by buildproxy
proxy_root() {
cmd=$1
arch=$2
branch=$3
buildid=$4
builddir=$5
shift 5
args=$@
id=$(id -u)
if [ ${id} != "0" ]; then
/var/portbuild/scripts/buildproxy-client "build ${cmd} ${arch} ${branch} ${buildid} ${args}"
error=$?
if [ ${error} -eq 254 ]; then
echo "Proxy error"
fi
else
eval "do_${cmd} ${arch} ${branch} ${buildid} ${builddir} ${args}"
error=$?
fi
exit ${error}
}
# Run a command as the ports-${arch} user if root
proxy_user() {
cmd=$1
arch=$2
branch=$3
buildid=$4
builddir=$5
shift 5
args=$@
id=$(id -u)
if [ ${id} != "0" ]; then
eval "do_${cmd} ${arch} ${branch} ${buildid} \"${builddir}\" ${args}"
error=$?
else
su ports-${arch} -c "/var/portbuild/scripts/build ${cmd} ${arch} ${branch} ${buildid} \"${builddir}\" ${args}"
error=$?
fi
exit ${error}
}
usage () {
echo "usage: build <command> <arch> <branch> [<buildid>] [<options> ...]"
exit 1
}
##################
if [ $# -lt 3 ]; then
usage
fi
cmd=$1
arch=$2
branch=$3
shift 3
. ${pb}/conf/server.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
pbab=${pb}/${arch}/${branch}
validate_env ${arch} ${branch} || exit 1
# Not every command requires a buildid as arg
if [ $# -ge 1 ]; then
buildid=$1
shift 1
# Most commands require a buildid that is valid on the server. The
# exception is "cleanup" which is cleaning up a client build that may
# already be destroyed on the server.
case "$cmd" in
cleanup)
# Resolve symlinks but don't bail if the build doesn't exist.
newbuildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ ! -z "${newbuildid}" -a "${newbuildid}" != "${buildid}" ]; then
echo "Resolved ${buildid} to ${newbuildid}"
buildid=${newbuildid}
builddir=$(realpath ${pbab}/builds/${buildid}/)
# We can't rely on buildenv for this code path
fi
;;
create)
# XXX some way to avoid the latest/previous dance?
if [ -z "${buildid}" -o "${buildid}" = "latest" ]; then
buildid=$(now)"."`hostname -s`
elif [ "${buildid}" = "previous" ]; then
echo "Use build clone latest instead"
exit 1
else
buildid=${buildid%/}
fi
# We can't rely on buildenv for this code path
;;
*)
newbuildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${newbuildid}" ]; then
echo "Build ID ${buildid} does not exist"
exit 1
fi
if [ ${newbuildid} != ${buildid} ]; then
echo "Resolved ${buildid} to ${newbuildid}"
buildid=${newbuildid}
fi
builddir=$(realpath ${pbab}/builds/${buildid}/)
buildenv ${pb} ${arch} ${branch} ${builddir}
;;
esac
fi
# Unprivileged commands
case "$cmd" in
list)
do_list ${arch} ${branch} $@ || exit 1
;;
create)
if [ -z "${buildid}" ]; then
usage
fi
proxy_root create ${arch} ${branch} ${buildid} $@ || exit 1
;;
clone)
if [ -z "${buildid}" ]; then
usage
fi
proxy_root clone ${arch} ${branch} ${buildid} ${builddir} $@ || exit 1
;;
portsupdate)
if [ -z "${buildid}" ]; then
usage
fi
proxy_root portsupdate ${arch} ${branch} ${buildid} ${builddir} $@ || exit 1
;;
srcupdate)
if [ -z "${buildid}" ]; then
usage
fi
proxy_root srcupdate ${arch} ${branch} ${buildid} ${builddir} $@ || exit 1
;;
cleanup)
if [ -z "${buildid}" ]; then
usage
fi
# builddir may be null if cleaning up a destroyed build
proxy_user cleanup ${arch} ${branch} ${buildid} "${builddir}" $@ || exit 1
;;
upload)
if [ -z "${buildid}" ]; then
usage
fi
proxy_user upload ${arch} ${branch} ${buildid} ${builddir} $@ || exit 1
;;
destroy)
if [ -z "${buildid}" ]; then
usage
fi
proxy_root destroy ${arch} ${branch} ${buildid} ${builddir} $@ || exit 1
;;
*)
echo "build: invalid command: $cmd"
exit 1
;;
esac

View File

@ -1,170 +0,0 @@
#!/bin/sh
# $FreeBSD$
#
# Set up the build variables which are used by a given build. Some
# of the code here is common to both clients and server; some is
# particular to each.
# get the major branch number. only used on server side.
get_branch_base() {
strippedbranch=${1%%[-\.]*}
branchbase=`echo $strippedbranch | grep ${SRC_BRANCHES_PATTERN}`
echo ${branchbase}
}
# only used on server side
validate_env() {
arch=$1
branch=$2
valid_arch=0
for i in ${SUPPORTED_ARCHS}; do
if [ ${i} = ${arch} ]; then
valid_arch=1
break
fi
done
if [ $valid_arch = 0 ]; then
echo "Invalid arch: ${arch}"
return 1
fi
branchbase=$(get_branch_base ${branch})
if [ -z "${branchbase}" ]; then
echo "Invalid branch: ${branch}"
return 1
fi
return 0
}
# only used on server side
resolve() {
pb=$1
arch=$2
branch=$3
buildid=$4
# Resolve a possibly symlinked buildid (e.g. "latest") to the
# underlying physical directory
pbab=${pb}/${arch}/${branch}
builddir=${pbab}/builds/${buildid}/
if [ ! -d ${builddir} ]; then
return 1
else
builddir=$(realpath ${builddir})
fi
buildid=${builddir%/}
buildid=${buildid##*/}
echo ${buildid}
}
# derive the source tree metadata and export it. common to both client and server.
export_src_metadata() {
src_base=$1
if [ -f ${src_base}/sys/sys/param.h ]; then
export OSVERSION=$(awk '/^#define __FreeBSD_version/ {print $3}' < ${src_base}/sys/sys/param.h)
else
echo "export_src_metadata: couldn't find ${src_base}/sys/sys/param.h!"
fi
if [ -f ${src_base}/sys/conf/newvers.sh ]; then
export OSREL=$(awk 'BEGIN {FS="\""}; /^REVISION/ {print $2}' < ${src_base}/sys/conf/newvers.sh)
export BRANCH=$(awk 'BEGIN {FS="\""}; /^BRANCH/ {print $2}' < ${src_base}/sys/conf/newvers.sh)
else
echo "export_src_metadata: couldn't find ${src_base}/sys/conf/newvers.sh!"
fi
}
#
# establish commonly-used environment variables (server-side)
#
buildenv () {
pb=$1
arch=$2
branch=$3
builddir=$4
# set up things for INDEX/duds builds
# first, don't pick up host OPTIONS
export PORT_DBDIR=/nonexistent
# Have to use realpath because 'make index' doesn't deal with
# symlinks in PORTSDIR - kk 020311
if [ -d ${builddir}/ports/ ]; then
export PORTSDIR=$(realpath ${builddir}/ports)
else
export PORTSDIR=/nonexistent
fi
if [ -d ${builddir}/src/ ]; then
export SRC_BASE=$(realpath ${builddir}/src)
else
export SRC_BASE=/nonexistent
fi
export_src_metadata ${SRC_BASE}
# for archs that support COMPAT_IA32, set some flags for INDEX.
# Client kernels should be built appropriately.
for i in ${ARCHS_SUPPORTING_COMPAT_IA32}; do
if [ ${i} = ${arch} ]; then
export HAVE_COMPAT_IA32_KERN="yes"
export HAVE_COMPAT_IA32_LIBS="yes"
break
fi
done
for i in ${ARCHS_REQUIRING_LINPROCFS}; do
if [ ${i} = ${arch} ]; then
export LINUX_OSRELEASE=${DEFAULT_LINUX_OSRELEASE}
break
fi
done
buildenv.common
# override things destined for bsd.port.mk
export DISTDIR=${builddir}/distfiles
export PACKAGES=${builddir}/packages
branchbase=$(get_branch_base ${branch})
if [ -z "${branchbase}" ]; then
echo "buildenv: invalid branch ${branch}"
exit 1
else
export INDEXFILE=INDEX-${branchbase}
fi
# probably only used in mkbindist
export __MAKE_CONF=${pb}/${arch}/make.conf
}
#
# establish commonly-used environment variables (client-side)
#
buildenv.client() {
# derive OSREL, OSVERSION, and BRANCH
export_src_metadata $1
# manually override results of uname(1)
export UNAME_m=${ARCH}
export UNAME_n=freebsd.org
export UNAME_p=${ARCH}
export UNAME_r=${OSREL}-${BRANCH}
export UNAME_s=FreeBSD
export UNAME_v="FreeBSD ${OSREL}-${BRANCH} #0: $(date) portmgr@freebsd.org:/usr/src/sys/magic/kernel/path"
}
#
# establish commonly-used environment variables (common to clients and server)
#
buildenv.common() {
export ARCH=${arch}
export MACHINE_ARCH=${arch}
export BATCH=1
export PACKAGE_BUILDING=1
}

View File

@ -1,64 +0,0 @@
#!/bin/sh
#
# buildfailure <arch> <branch> <buildid> <pkgname>
cleanup() {
echo "Problem writing new failure file!"
rm -f failure.new
exit 1
}
# configurable variables
pb=/var/portbuild
usage () {
echo "usage: buildfailure arch branch buildid pkgname"
exit 1
}
if [ $# -ne 4 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
pkgname=$4
shift 4
builddir=${pb}/${arch}/${branch}/builds/${buildid}
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
buildenv ${pb} ${arch} ${branch} ${builddir}
# Don't pick up installed packages from the host
export LOCALBASE=/nonexistentlocal
index=${PORTSDIR}/${INDEXFILE}
portloc=$(grep "^$pkgname|" ${index} | cut -f 2 -d \| | sed s,/usr/ports/,,)
pkgbase=$(cd ${PORTSDIR}/${portloc}/ && make -V PKGBASE)
cd ${pb}/${arch}/${branch}
entry=$(grep "^${portloc}|" failure)
date=$(date +%s)
IFS='|'
if [ ! -z "$entry" ]; then
count=$(echo $entry | cut -f 6 -d \ )
olddate=$(echo $entry | cut -f 4 -d \ )
(grep -v "^${portloc}|" failure > failure.new) || cleanup
(echo "${portloc}|${pkgbase}|${pkgname}|${olddate}|${date}|$((${count}+1))" >> failure.new) || cleanup
mv failure.new failure
else
(echo "${portloc}|${pkgbase}|${pkgname}|${date}|${date}|1" >> failure) || cleanup
fi
link=${pb}/${arch}/${branch}/latest/${portloc}
mkdir -p $(dirname ${link})
errorloc=$(realpath ${builddir}/errors/${pkgname}.log)
ln -sf ${errorloc} ${link}

View File

@ -1,88 +0,0 @@
#!/usr/bin/env python
# $FreeBSD$
#
# Allow access to privileged build commands to ports-* users for
# managing their own build spaces.
import sys, socket, os, commands
from freebsd import *
from freebsd_config import *
CONFIG_DIR="/var/portbuild"
CONFIG_SUBDIR="conf"
CONFIG_FILENAME="server.conf"
valid_cmds = ['create', 'clone', 'portsupdate', 'srcupdate', 'destroy']
def validate(uid, arch):
if uid == 0:
return True
if getuidbyname("ports-%s" % arch) == uid:
return True
return False
def process(cmd, sockfile):
if len(cmd) < 5:
return (254, "Wrong number of arguments")
if cmd[0] != "build":
return (254, "Invalid command")
try:
if not validate(uid, cmd[2]):
return (254, "Permission denied")
except:
return (254, "Internal error")
if cmd[1] not in valid_cmds:
return (254, "Permission denied")
for i in cmd:
for j in i:
if not j.isalnum() and not j in "-_.":
return (254, "Illegal characters in input")
(status, out) = commands.getstatusoutput("/var/portbuild/scripts/build %s" % " ".join(cmd[1:]))
return (status, out)
config = getConfig( CONFIG_DIR, CONFIG_SUBDIR, CONFIG_FILENAME )
BUILDPROXY_SOCKET_FILE = config.get( 'BUILDPROXY_SOCKET_FILE' )
if os.path.exists(BUILDPROXY_SOCKET_FILE):
os.unlink(BUILDPROXY_SOCKET_FILE)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(BUILDPROXY_SOCKET_FILE)
os.chmod(BUILDPROXY_SOCKET_FILE, 0660)
os.chown(BUILDPROXY_SOCKET_FILE, -1, getgidbyname('portmgr'))
s.listen(10)
while True:
try:
(conn, addr) = s.accept()
(uid, gids) = getpeerid(conn)
sockfile = conn.makefile()
cmd = sockfile.readline().rstrip().split()
print cmd
try:
(status, out) = process(cmd, sockfile)
except:
(status, out) = (254, "Internal error")
sockfile.write("%d\n" % status)
sockfile.write(out)
sockfile.flush()
sockfile.close()
conn.close()
except Exception, e:
print "buildproxy: exception: " + str( e )
pass

View File

@ -1,47 +0,0 @@
#!/usr/bin/env python
# $FreeBSD$
#
# Client for communicating proxy requests to the buildproxy
import sys, socket, os, commands
from freebsd import *
from freebsd_config import *
CONFIG_DIR="/var/portbuild"
CONFIG_SUBDIR="conf"
CONFIG_FILENAME="server.conf"
config = getConfig( CONFIG_DIR, CONFIG_SUBDIR, CONFIG_FILENAME )
BUILDPROXY_SOCKET_FILE = config.get( 'BUILDPROXY_SOCKET_FILE' )
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(BUILDPROXY_SOCKET_FILE)
sockfile = s.makefile()
sockfile.write("%s\n" % " ".join(sys.argv[1:]))
sockfile.flush()
code = sockfile.readline().strip()
out = "".join(sockfile.readlines())
if out:
print out
sockfile.close()
s.close()
sys.exit(int(code))
except Exception, e:
print "buildproxy-client: exception:"
print e
try:
if code == None:
print "buildproxy-client: error: code was None"
else:
print "buildproxy-client: error: code was '" + code + "'"
except Exception, e2:
print "buildproxy-client: exception 2:"
print e2
raise e # XXX debug
sys.exit(254)

View File

@ -1,432 +0,0 @@
#!/bin/sh
# $FreeBSD$
# client-side script to actually build a package
# usage: $0 DIRNAME PHASE
# PHASE is 1 (checksum) or 2 (package)
cleanup() {
status=$1
# Don't keep distfiles if 'make checksum' failed
keep_distfiles=$(make -V ALWAYS_KEEP_DISTFILES)
if [ ${status} -eq 1 -o -z "${keep_distfiles}" ]; then
cd ${dir}
distdir=$(make -V DISTDIR)
if [ ! -z "${distdir}" ]; then
rm -rf ${distdir}/*
fi
fi
if [ -e ${dir}/.keep ]; then
cd ${dir}
objdir=$(make -V WRKDIR)
tar cfjC /tmp/work.tbz ${objdir}/.. work
fi
if [ ${status} -gt 0 ]; then
cat /tmp/make.log${status}
fi
echo 1 > /tmp/status
touch /.dirty
echo "================================================================"
echo -n "build of ${dir} ended at "
date
exit 0
}
add_pkg() {
pkgs=$*
echo add_pkg $pkgs
cd /tmp/depends
# XXX TODO more hard-coding
export PKG_PATH=/tmp/depends
if [ ! -z "${pkgs}" ]; then
arch=$(uname -m)
echo "adding dependencies"
for i in $pkgs; do
echo "pkg_add $i"
base=$(basename $i .tgz)
base=$(basename $base .tbz)
if pkg_info -q -e $base; then
echo "skipping $base, already added"
else
if ! pkg_add $i; then
echo "error in dependency $i, exiting"
cleanup 0
fi
fi
done
fi
}
del_pkg() {
pkgs=$*
cd /tmp/depends
# XXX TODO more hard-coding
export PKG_PATH=/tmp/depends
if [ ! -z "${pkgs}" ]; then
recursion=1
dellist=""
while [ $recursion -eq 1 ]; do
unset delpkg nextpkg
recursion=0
for i in $pkgs; do
base=$(basename $i .tgz)
base=$(basename $base .tbz)
if [ -s /var/db/pkg/${base}/+REQUIRED_BY ]; then
recursion=1
nextpkg="${base} ${nextpkg}"
elif [ -d /var/db/pkg/${base}/ ]; then
delpkg="${base} ${delpkg}"
fi
done
pkgs="${nextpkg}"
if [ "$dellist" != "" -a "$dellist" = "$delpkg" ]; then
echo "deleted list =\""$dellist"\", packages to delete ="\"$delpkg\"
echo "The following packages were left behind (perhaps your dependency list is incomplete):"
ls /var/db/pkg
echo "error in pkg_delete, exiting"
cleanup 0
else
for j in ${delpkg}; do
echo "Deleting ${j}"
if ! (pkg_delete -f $j); then
echo "--> error in pkg_delete, exiting"
cleanup 0
fi
done
dellist=$delpkg
fi
done
fi
}
dir=$1
phase=$2
ED=$3
PD=$4
FD=$5
BD=$6
RD=$7
#export PATH=/ccache/libexec/ccache/:$PATH
#export CCACHE_PATH=/usr/bin:/usr/local/bin
# pick up value from environment set up in portbuild script
L=`echo ${LOCALBASE} | sed 's,^/,,'`
Z=`ident ${dir}/Makefile | grep 'FreeBSD:' | sed 's/^[ \t]*//'`
cd $dir || exit 1
restr=$(make -V RESTRICTED)
# Keep restricted distfiles in a subdirectory for extra protection
# against leakage
if [ ! -z "$restr" ]; then
# pick up value from environment set up in portbuild script
echo "DISTDIR=${DISTDIR}"
export DISTDIR=${DISTDIR}/RESTRICTED
echo "DISTDIR=${DISTDIR}"
mkdir -p ${DISTDIR}
fi
if [ $phase = 1 ]; then
# note: if you change this header, also change processonelog and processlogs2
cd $dir || exit 1
echo "building for: $(uname -mr)"
echo "maintained by: $(make maintainer)"
echo "port directory: ${dir}"
echo "Makefile ident: ${Z}"
echo "build started at $(date)"
echo "FETCH_DEPENDS=${FD}"
echo "PATCH_DEPENDS=${PD}"
echo "EXTRACT_DEPENDS=${ED}"
echo "BUILD_DEPENDS=${BD}"
echo "RUN_DEPENDS=${RD}"
echo "prefixes: LOCALBASE=${L}"
# Stash a copy of /etc/master.passwd and /etc/group to detect whether someone modifies it
cp /etc/master.passwd /etc/master.passwd-save
cp /etc/group /etc/group-save
# Files we do not care about changing between pre-build and post-cleanup
cat > /tmp/mtree.preexclude <<EOF
./root/*
./var/*
./tmp/*
./etc/make.conf.bak
./etc/make.conf
./work/*
./compat/linux/proc
./usr/share/man/cat*/*
./usr/local/etc/apache
./usr/local/news
./usr/local/share/xml
./usr/local/etc/gconf
./var/db/fontconfig
EOF
# Record a "pristine" mtree.
mtree -X /tmp/mtree.preexclude -xcn -k uid,gid,mode -p / > /tmp/mtree.pristine
add_pkg $FD
cd $dir || exit 1
pkgname=$(make package-name)
echo "================================================================"
echo "====================<phase 1: make checksum>===================="
# pick up value from environment set up in portbuild script
if /pnohang ${BUILD_TIMEOUT} /tmp/make.log1 ${pkgname} make checksum; then
cat /tmp/make.log1
echo "0" > /tmp/status
else
cleanup 1
fi
else
cd $dir || exit 1
pkgname=$(make package-name)
echo "================================================================"
echo "====================<phase 2: make extract>===================="
add_pkg ${ED}
cd $dir
/pnohang ${BUILD_TIMEOUT} /tmp/make.log2 ${pkgname} make extract || cleanup 2
cat /tmp/make.log2
del_pkg ${ED}
# Fetch depends still need to be here for 'make extract' since that target
# always reruns 'make fetch' due to the lack of fetch cookie (and no place
# to put it since WRKDIR isn't created by 'make fetch')
del_pkg $FD
echo "================================================================"
echo "====================<phase 3: make patch>===================="
add_pkg ${PD}
cd $dir
/pnohang ${BUILD_TIMEOUT} /tmp/make.log3 ${pkgname} make patch || cleanup 3
cat /tmp/make.log3
del_pkg ${PD}
echo "================================================================"
echo "====================<phase 4: make build>===================="
add_pkg ${BD}
# Files we do not care about changing between pre-build and post-cleanup
cat > /tmp/mtree.buildexclude <<EOF
./var/log/*
./tmp/*
./work/*
./compat/linux/proc
./root/*
./var/mail/*
./var/tmp/*
./usr/share/man/cat*/*
./usr/local/etc/apache
./usr/local/news
./usr/local/share/xml
./usr/local/etc/gconf
./var/db/fontconfig
EOF
# Record a "pristine" mtree.
mtree -X /tmp/mtree.buildexclude -xcn -k uid,gid,mode -p / > /tmp/mtree.prebuild
xvfb=0
if which -s Xvfb; then
xvfb=1
pid=$(echo $$ % 32768 | bc)
# XXX MCL HUH?
X11BASE=$(which Xvfb | sed -e 's./bin/Xvfb..')
Xvfb :${pid} -fp ${X11BASE}/lib/X11/fonts/misc &
# pick up value from environment set up in portbuild script
DISPLAY=${JAIL_ADDR}:${pid}
export DISPLAY
fi
cd $dir
/pnohang ${BUILD_TIMEOUT} /tmp/make.log4 ${pkgname} make build || cleanup 4
cat /tmp/make.log4
echo "================================================================"
echo "====================<phase 5: make test>===================="
cd $dir
/pnohang ${BUILD_TIMEOUT} /tmp/make.log5 ${pkgname} make -k regression-test
cat /tmp/make.log5
mtree -X /tmp/mtree.buildexclude -x -f /tmp/mtree.prebuild -p / | egrep -v "^(${L}/var|${L}/lib/X11/xserver/SecurityPolicy|${L}/share/nls/POSIX|${L}/share/nls/en_US.US-ASCII|etc/services|compat |usr/X11R6 |etc/manpath.config|etc/.*.bak|${L}/info/dir|${L}/lib/X11/fonts/.*/fonts\.|usr/local/man/..( |/man. )|${L}/lib/X11/fonts/TrueType|${L}/etc/gconf/gconf.xml.defaults/%gconf-tree.*.xml|var/db/fontconfig/* )" > /tmp/list.preinstall
if [ -s /tmp/list.preinstall ]; then
echo "================================================================"
echo "Fatal error: filesystem was touched prior to 'make install' phase"
cat /tmp/list.preinstall
echo "================================================================"
cleanup 0
fi
echo "================================================================"
echo "====================<phase 6: make install>===================="
add_pkg ${RD}
cat > /tmp/mtree.exclude <<EOF
./root/*
./var/*
./tmp/*
./etc/make.conf.bak
./etc/make.conf
./work/*
./compat/linux/proc
EOF
mtree -X /tmp/mtree.exclude -xcn -k uid,gid,mode -p / > /tmp/mtree
cd $dir
if /pnohang ${BUILD_TIMEOUT} /tmp/make.log6 ${pkgname} make install; then
cat /tmp/make.log6
echo "0" > /tmp/status
else
cleanup 6
fi
echo "================================================================"
echo "====================<phase 7: make package>===================="
cd $dir
if /pnohang ${BUILD_TIMEOUT} /tmp/make.log7 ${pkgname} make package; then
cat /tmp/make.log7
echo "0" > /tmp/status
prefix=$(make -V PREFIX)
del_pkg ${pkgname}
else
cleanup 7
fi
mtree -X /tmp/mtree.exclude -x -f /tmp/mtree -p / | egrep -v "^(${L}/var|${L}/lib/X11/xserver/SecurityPolicy|${L}/share/nls/POSIX|${L}/share/nls/en_US.US-ASCII|etc/services|compat |usr/X11R6 |etc/manpath.config|etc/.*.bak|${L}/info/dir|${L}/lib/X11/fonts/.*/fonts\.|usr/local/man/..( |/man. )|${L}/lib/X11/fonts/TrueType|${L}/etc/gconf/gconf.xml.defaults/%gconf-tree.*.xml|var/db/fontconfig/* )" > /tmp/list3
# Compare the state of the filesystem now to before the 'make install' phase
dirty=0
if [ -s /tmp/list3 ]; then
cd /
grep ' extra$' /tmp/list3 | awk '{print $1}' | xargs -J % find % -ls > /tmp/list4
grep ' missing$' /tmp/list3 > /tmp/list5
grep -vE ' (extra|missing)$' /tmp/list3 > /tmp/list6
# pick up value from environment set up in portbuild script
if [ "x${NOPLISTCHECK}" = "x" ]; then
if grep -vq "$L/etc/" /tmp/list4; then
echo "1" > /tmp/status
dirty=1
fi
if [ -s /tmp/list5 -o -s /tmp/list6 ]; then
echo "1" > /tmp/status
dirty=1
fi
fi
echo "================================================================"
fi
echo
echo "=== Checking filesystem state"
if [ -s /tmp/list4 ]; then
echo "list of extra files and directories in / (not present before this port was installed but present after it was deinstalled)"
cat /tmp/list4
fi
if [ -s /tmp/list5 ]; then
echo "list of files present before this port was installed but missing after it was deinstalled)"
cat /tmp/list5
fi
if [ -s /tmp/list6 ]; then
echo "list of filesystem changes from before and after port installation and deinstallation"
cat /tmp/list6
fi
if [ "${dirty}" = 1 ]; then
cleanup 0
fi
# BUILD_DEPENDS and RUN_DEPENDS are both present at install-time (e.g. gmake)
# Concatenate and remove duplicates
BRD=$(echo $BD $RD | tr ' ' '\n' | sort -u | tr '\n' ' ')
del_pkg ${BRD}
cd /var/db/pkg
if [ $(echo $(echo * | wc -c)) != 2 ]; then
echo "leftover packages:" *
del_pkg *
echo "1" > /tmp/status
cleanup 0
fi
# Compare the state of the filesystem now to clean system (should again be clean)
mtree -X /tmp/mtree.preexclude -x -f /tmp/mtree.pristine -p / | egrep -v "^(${L}/var|${L}/lib/X11/xserver/SecurityPolicy|${L}/share/nls/POSIX|${L}/share/nls/en_US.US-ASCII|etc/services|compat |usr/X11R6 |etc/manpath.config|etc/.*.bak|${L}/info/dir|${L}/lib/X11/fonts/.*/fonts\.|usr/local/man/..( |/man. )|${L}/lib/X11/fonts/TrueType )" > /tmp/list3
echo
echo "=== Checking filesystem state after all packages deleted"
if [ -s /tmp/list3 ]; then
cd /
grep ' extra$' /tmp/list3 | awk '{print $1}' | xargs -J % find % -ls > /tmp/list4
grep ' missing$' /tmp/list3 > /tmp/list5
grep -vE ' (extra|missing)$' /tmp/list3 > /tmp/list6
if [ "x${NOPLISTCHECK}" = "x" ]; then
if grep -vq "$L/etc/" /tmp/list4; then
#echo "1" > /tmp/status
fi
if [ -s /tmp/list5 ]; then
#echo "1" > /tmp/status
fi
fi
echo "================================================================"
if [ -s /tmp/list4 ]; then
echo "list of extra files and directories in / (not present on clean system but present after everything was deinstalled)"
cat /tmp/list4
touch /.dirty
fi
if [ -s /tmp/list5 ]; then
echo "list of files present on clean system but missing after everything was deinstalled)"
cat /tmp/list5
touch /.dirty
fi
if [ -s /tmp/list6 ]; then
echo "list of filesystem changes from before and after all port installation/deinstallation"
cat /tmp/list6
touch /.dirty
fi
fi
cmp /etc/group /etc/group-save || (echo "=== /etc/group was modified:"; diff -du /etc/group-save /etc/group)
cmp /etc/master.passwd /etc/master.passwd-save || (echo "=== /etc/master.passwd was modified:"; diff -du /etc/master.passwd-save /etc/master.passwd)
if [ ${xvfb} = 1 ]; then
kill $(jobid %1)
fi
# XXX Don't keep distfiles if checksum mismatches
cd ${dir}
keep_distfiles=$(make -V ALWAYS_KEEP_DISTFILES)
distdir=$(make -V DISTDIR)
if [ -z "${keep_distfiles}" -a ! -z "${distdir}" ]; then
rm -rf ${distdir}/*
fi
if [ -e ${dir}/.keep ]; then
cd ${dir}
objdir=$(make -V WRKDIR)
tar cfjC /tmp/work.tbz ${objdir}/.. work
fi
echo "================================================================"
echo -n "build of ${dir} ended at "
date
fi
exit 0

View File

@ -1,48 +0,0 @@
#!/bin/sh
#
# buildsuccess <arch> <branch> <buildid> <pkgname>
# configurable variables
pb=/var/portbuild
usage () {
echo "usage: buildsuccess arch branch buildid pkgname"
exit 1
}
if [ $# -ne 4 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
pkgname=$4
shift 4
builddir=${pb}/${arch}/${branch}/builds/${buildid}
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
buildenv ${pb} ${arch} ${branch} ${builddir}
# Don't pick up installed packages from the host
export LOCALBASE=/nonexistentlocal
index=${PORTSDIR}/${INDEXFILE}
portloc=$(grep "^$pkgname|" ${index} | cut -f 2 -d \| | sed s,/usr/ports/,,)
cd ${pb}/${arch}/${branch}
if grep -q "^${portloc}|" failure; then
echo | mail -s "${pkgname} now builds on ${arch} ${branch}" ${mailto}
grep -v "^${portloc}|" failure > failure.new
mv failure.new failure
fi
if [ -L ${pb}/${arch}/${branch}/latest/${portloc} ]; then
rm -f ${pb}/${arch}/${branch}/latest/${portloc}
fi

View File

@ -1,35 +0,0 @@
#!/bin/sh
#
# Runs in the background on the server. This script keeps track of
# the relative loads of the client machines, and specifies which machine(s)
# should be handed new jobs, according to the following algorithm:
#
# For each machine listed in ${buildroot}/mlist, check whether its load
# information has been updated in the last 15 seconds (should be updated by
# the reportload script every 5 seconds). If so, then divide the number of
# running jobs on the client by its weighting in mlist, and output the
# machine(s) with the minimum value to ${buildroot}/ulist.
#
# Dividing by the weight has the effect of grouping machines with similar
# job load (e.g. a weight of 5 will rank machines with job loads 0, 1, 2, 3, 4
# as the same; if the machines all had a weight of 1 then it would only
# choose the machine with the least value of the job load, and would probably
# choose a single machine most of the time).
buildroot=/var/portbuild
arches=$*
while true; do
for i in ${arches}; do
mlist=${buildroot}/${i}/mlist
set $(cat $mlist)
while [ $# -gt 0 ]; do
m=$1
${buildroot}/scripts/pollmachine ${i} ${m}
shift 1
done
done
sleep 120
done

View File

@ -1,10 +0,0 @@
#!/bin/sh
#
# Start the checkmachines script in the background, at system startup.
s=/var/portbuild/scripts/checkmachines
if [ -x $s ]; then
$s &
echo -n ' checkmachines'
fi

View File

@ -1,55 +0,0 @@
#!/usr/bin/env python
import os, sys
if len(sys.argv) != 3:
print "%s: <index> <pkgdir>" % sys.argv[0]
sys.exit()
indexfile = sys.argv[1]
pkgdir = sys.argv[2]
if not pkgdir.endswith("/All"):
pkgdir = pkgdir + "/All"
packages = [pkg for (pkg, ext) in map(os.path.splitext, os.listdir(pkgdir)) if ext == ".tbz"]
index=[]
pkgs=[]
for i in file(indexfile):
out = i.rstrip().split("|")
out[7] = out[7].split(" ") # build dep
out[8] = out[8].split(" ") # run dep
index.append(out)
# Keep track of all the packages we have seen in the index. In
# principle there is no need to track the build/run deps since
# they will also be listed in field 0. We could add a sanity
# check for this.
pkgs.append(out[0])
pkgs.extend(out[7])
pkgs.extend(out[8])
used=set(pkgs)
notfound=used.difference(set(packages))
# Write out the new index, stripping out the entries for missing
# packages as well as dependencies from existing packages on the
# missing ones.
#
# This is slightly dubious since it will intentionally list packages
# that are present but missing dependencies on non-redistributable
# things like jdk that were successfully built but removed already, so
# the dependency lists will not be complete. It matches the old
# chopindex.sh behaviour though.
#
# I think it would be better to just prune those incomplete packages
# from the INDEX altogether, but I don't know if anyone is relying on
# this historical behaviour.
for data in index:
if data[0] not in notfound:
print "%s|%s|%s|%s" % ("|".join(data[:7]),
" ".join([j for j in data[7] if j not in notfound]),
" ".join([j for j in data[8] if j not in notfound]),
"|".join(data[9:]))

View File

@ -1,164 +0,0 @@
#!/bin/sh
# client-side script to claim a chroot
# usage: claim-chroot ${arch} ${branch} ${pkgname} ${buildid}
# Care needs to be taken with the output of this script, it cannot
# output anything except space-separated pairs of "keyword value".
#
# Keywords:
# chroot : successfully claimed a chroot
# setup : we own the rights to setup the build env
# wait : someone else is setting up the build env
# In case of other error, just exit.
# XXX if the setupnode process was a single process invocation we
# could use a lockf lock, and be able to tell if the setup process was
# still running or died prematurely
pb=/var/portbuild
usage () {
echo "usage: claim-chroot arch branch buildid"
exit 1
}
if [ $# -ne 4 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
pkgname=$4
shift 4
# If client has just rebooted it may not have any files yet
if [ ! -f /tmp/.boot_finished ]; then
echo "wait boot"
exit 1
fi
# Do we need to set up the client after cold boot?
#
# NB: mkdir is being used as an atomic test-and-set operation to
# provide mutual exclusion against other callers, since we only want
# one of them to perform setup
builddir=${pb}/${arch}/${branch}/builds/${buildid}
# Is the build environment populated? Again we only want a single
# instance to gain setup rights if not.
if (mkdir /tmp/.setup-${buildid} 2> /dev/null); then
# The buildenv is not set up, tell the caller to do it
echo "setup ${builddir}"
exit 1
fi
if [ ! -f ${builddir}/.ready ]; then
# The buildenv is still being set up
echo "wait ${builddir}"
exit 1
fi
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/${arch}/portbuild.$(hostname)
buildroot=${scratchdir}
pkgname=${pkgname%.${PKGSUFFIX}}
chrootdir=${buildroot}/${branch}/${buildid}/chroot
# Perform initial sanity checks
# Check squid is running
if [ ! -z "${squid_dir}" ]; then
/usr/local/sbin/squid -k check 2> /dev/null
status=$?
if [ "${status}" != "0" ]; then
touch ${scratchdir}/.squid
/usr/local/etc/rc.d/squid start > /dev/null &
echo "error squid"
exit 1
else
rm -f ${scratchdir}/.squid
fi
fi
# Check for enough disk space
df=$(df -k ${scratchdir} | tail -1 | awk '{print $4}')
if [ ${df} -lt 102400 ]; then
touch ${scratchdir}/.disk
echo "error disk"
exit 1
else
rm -f ${scratchdir}/.disk
fi
found=0
# Look for pre-existing chroot directories that are populated and unused
for dir in ${chrootdir}/*; do
if [ -f ${dir}/.ready -o -f ${dir}/.dirty ]; then
# Atomically claim the directory
mkdir ${dir}/used 2>/dev/null || continue
touch ${dir}/used/${pkgname}
if [ -f ${dir}/.dirty ]; then
${pb}/scripts/clean-chroot ${arch} ${branch} ${buildid} ${dir} 2 >/dev/null 2>/dev/null &
continue
fi
found=1
chroot=${dir}
break
fi
done
chrootnum=$$
# If we didn't find a pre-existing directory, create and claim a new one.
while [ ${found} != 1 ]; do
if [ "${use_zfs}" = "1" ]; then
chroot=${chrootdir}/${chrootnum}
# XXX deal with failure
zfs clone ${scratchdir#/}/${branch}/${buildid}/world@base ${chroot#/}
mkdir ${chroot}/used
elif [ "${use_md_swap}" = "1" ]; then
unit=$(mdconfig -a -t swap -s ${md_size})
if [ -z "${unit}" ]; then
echo "error mdconfig"
exit 1
fi
newfs /dev/${unit} > /dev/null
chrootnum=$(echo ${unit} | sed 's,md,,')
chroot=${chrootdir}/${chrootnum}
mkdir -p ${chroot}/used 2>/dev/null || continue
# Need to make sure that used/ is also present after mounting
# the fresh md so as to not leave open any races
mount -o async /dev/${unit} ${chroot}/used
mkdir ${chroot}/used/used
touch ${chroot}/used/used/${pkgname}
umount -f ${chroot}/used
mount -o async /dev/${unit} ${chroot}/
touch ${chroot}/.notready
else
chrootnum=$(($chrootnum+1))
chroot=${chrootdir}/${chrootnum}
mkdir -p ${chrootdir} 2> /dev/null || continue
mkdir ${chroot} 2>/dev/null || continue
mkdir ${chroot}/used 2>/dev/null || continue
touch ${chroot}/.notready
fi
if [ "${use_tmpfs}" = "1" ]; then
mount -t tmpfs -o "size=${tmpfs_size}" foo ${chroot}
mkdir ${chroot}/used 2>/dev/null || echo "ERROR: mkdir race"
touch ${chroot}/.notready
fi
touch ${chroot}/used/${pkgname}
found=1
done
echo "chroot ${chroot}"

View File

@ -1,124 +0,0 @@
#!/bin/sh
# $FreeBSD$
# client-side script to clean up a chroot
kill_procs()
{
dir=$1
mount=$2
pids="XXX"
while [ ! -z "${pids}" ]; do
pids=$(fstat -f "${dir}${mount}" | tail +2 | awk '{print $3}' | sort -u)
if [ ! -z "${pids}" ]; then
echo "Killing off pids in ${dir}"
ps -p $pids
kill -KILL ${pids} 2> /dev/null
sleep 2
fi
done
}
cleanup_mount() {
chroot=$1
mount=$2
if [ -d ${chroot}${mount} ]; then
mdir=$(fstat -f ${chroot}${mount} | head -2 | tail -1 | awk '{print $5}')
if [ "${mdir}" = "MOUNT" ]; then
umount -f ${chroot}${mount} || echo "Cleanup of ${chroot}${mount} on $(hostname) failed!"
fi
if [ "${mdir}" = "${chroot}${mount}" ]; then
kill_procs ${chroot} ${mount}
umount -f ${chroot}${mount} || echo "Cleanup of ${chroot}${mount} on $(hostname) failed!"
fi
fi
}
arch=$1
branch=$2
buildid=$3
chroot=$4
clean=$5
pb=/var/portbuild
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/${arch}/portbuild.$(hostname)
# directories to clean
cleandirs="${LOCALBASE} /compat /var/db/pkg"
if [ ! -d "${chroot}" ]; then
exit 0
fi
if [ `realpath ${chroot}` = "/" ]; then
# Don't spam the root file system if something has gone wrong!
exit 1
fi
if [ -f ${chroot}/tmp/jail.id ]; then
pgrep -lfj `awk '{print $1}' ${chroot}/tmp/jail.id`
pkill -j `awk '{print $1}' ${chroot}/tmp/jail.id`
fi
#umount ${chroot}/proc
for i in ${ARCHS_REQUIRING_LINPROCFS}; do
if [ ${i} = ${arch} ]; then
cleanup_mount ${chroot} /compat/linux/proc
break
fi
done
for i in /a/ports /usr/src /dev /root/.ccache; do
cleanup_mount ${chroot} ${i}
done
if [ "${use_zfs}" = "1" ]; then
cleanup_mount ${chroot} ""
zfs destroy -f ${chroot#/}
elif [ "${use_tmpfs}" = "1" -a "${clean}" = "2" ]; then
cleanup_mount ${chroot} ""
if ! rm -rf ${chroot} >/dev/null 2>&1; then
chflags -R noschg ${chroot} >/dev/null 2>&1
rm -rf ${chroot} >/dev/null 2>&1
fi
# XXX possible race from cleanup and claim by next build?
elif [ "${use_md_swap}" = "1" -a \( "${md_persistent}" != "1" -a "${clean}" -gt "0" \) -o "${clean}" = "2" ]; then
cleanup_mount ${chroot} /used > /dev/null 2>&1
cleanup_mount ${chroot} ""
mdconfig -d -u $(basename ${chroot})
if ! rm -rf ${chroot} >/dev/null 2>&1; then
chflags -R noschg ${chroot} >/dev/null 2>&1
rm -rf ${chroot} >/dev/null 2>&1
fi
else
if [ "${clean}" = 1 ]; then
rm -rf ${chroot}/tmp/*
for dir in ${cleandirs}; do
if ! rm -rf ${chroot}${dir} >/dev/null 2>&1; then
chflags -R noschg ${chroot}${dir} >/dev/null 2>&1
rm -rf ${chroot}${dir} >/dev/null 2>&1
fi
done
test -x ${chroot}/sbin/ldconfig && chroot ${chroot} /sbin/ldconfig -R
for i in ${ARCHS_REQUIRING_AOUT_COMPAT}; do
if [ ${i} = ${arch} ]; then
test -x ${chroot}/sbin/ldconfig && chroot ${chroot} /sbin/ldconfig -aout -R
break
fi
done
rm -rf ${chroot}/var/db/pkg/*
rm -rf ${chroot}/used
elif [ "${clean}" = 2 ]; then
if ! rm -rf ${chroot} >/dev/null 2>&1; then
chflags -R noschg ${chroot} >/dev/null 2>&1
rm -rf ${chroot} >/dev/null 2>&1
fi
fi
fi

View File

@ -1,100 +0,0 @@
#!/bin/sh
# $FreeBSD$
# To be run on the client, this script looks for chroot directories
# that have not been used in 60 minutes, as well as directories listed
# as 'in use' that have not been touched in 5 days (corresponding to
# port builds that have timed out or shut down uncleanly) and prunes
# them to reclaim space.
pb=/var/portbuild
kill_procs()
{
dir=$1
pids="XXX"
while [ ! -z "${pids}" ]; do
pids=$(fstat -f "$dir" | tail +2 | awk '{print $3}' | sort -u)
if [ ! -z "${pids}" ]; then
echo "Killing off pids in ${dir} on $(hostname)"
ps -p $pids
kill -KILL ${pids} 2> /dev/null
sleep 2
fi
done
}
cleanup_mount() {
chroot=$1
mount=$2
if [ -d ${chroot}${mount} ]; then
mdir=$(fstat -f ${chroot}${mount} | head -2 | tail -1 | awk '{print $5}')
if [ "${mdir}" = "MOUNT" ]; then
umount ${chroot}${mount} || echo "Cleanup of ${chroot}${mount} on $(hostname) failed!"
fi
if [ "${mdir}" = "${chroot}${mount}" ]; then
kill_procs ${chroot}${mount}
umount ${chroot}${mount} || echo "Cleanup of ${chroot}${mount} on $(hostname) failed!"
fi
fi
}
# note: uname is not being overridden (should not need client.conf here)
arch=$(uname -m)
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/${arch}/portbuild.$(hostname)
if [ "${use_zfs}" = "1" ]; then
old=$(find ${scratchdir}/*/*/* -prune -mmin +60 2> /dev/null)
else
old=$(find ${scratchdir}/*/*/chroot/* -prune -mmin +60 2> /dev/null)
fi
if [ -z "${old}" ]; then
exit 0
fi
# Prune out chroots with active builds
for i in ${old}; do
if [ ! -d ${i}/used ]; then
old2="${i} ${old2}"
# Also remove "in use" chroots that were set up more than 5 days ago
elif [ ! -z "`find $i/used -prune -mmin +7200`" ]; then
echo "cleanup-chroots: Found old files on `hostname`:"
ls -l ${i}/tmp ${i}/used
echo "${i} allegedly in use but >5 days old"
old2="${i} ${old2}"
fi
done
if [ -z "${old2}" ]; then
exit 0
fi
# cleanup old NFS and devfs mounts
for i in ${old2}; do
mounts=$(mount | grep $i | awk '{print $3}')
if [ ! -z "${mounts}" ]; then
for j in ${mounts}; do
umount ${j} || cleanup_mount ${j}
done
umount ${i}/compat/linux/proc || cleanup_mount ${i}/compat/linux/proc
fi
if [ "${use_zfs}" != "1" -a "${use_md_swap}" = "1" ]; then
chrootnum=$(basename $i)
umount -f /dev/md${i}
mdconfig -d -u ${chrootnum}
fi
done
mkdir -p ${scratchdir}/old
mv ${old2} ${scratchdir}/old
rm -rf ${scratchdir}/old 2> /dev/null
if [ -d ${scratchdir}/old ]; then
chflags -R noschg ${scratchdir}/old
rm -rf ${scratchdir}/old
fi

View File

@ -1,37 +0,0 @@
#!/bin/sh
# $FreeBSD$
# Client script to collect metrics for ganglia:
# - current vnodes
# - max vnodes
# - number of packages built in the past hour
pb=/var/portbuild
arch=$(uname -m)
me=$(hostname)
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
gmetric --name="maxvnodes" --value=`sysctl -n kern.maxvnodes` --tmax=120 --dmax=0 --type=uint32 --units="# vnodes"
gmetric --name="vnodes" --value=`sysctl -n vfs.numvnodes` --tmax=120 --dmax=0 --type=uint32 --units="# vnodes"
if [ -f ${pb}/${arch}/portbuild.conf -a -f ${pb}/${arch}/portbuild.${me} ]; then
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/${arch}/portbuild.${me}
else
exit 1
fi
if [ ! -d ${scratchdir}/stamp ]; then
exit 1
fi
cd ${scratchdir}/stamp || exit 1
new=$(find . -mmin -60 | wc -l)
new=$((${new} + 0))
find . \! -mmin -60 -delete
gmetric --name="packages" --value="${new}" --tmax=120 --dmax=0 --type=int16 --units="Packages/hour" --conf="/usr/local/etc/gmond.conf"

View File

@ -1,56 +0,0 @@
#!/bin/sh
# prints out logs that are in dir1 but not in dir2
# XXX out of date and disabled
if [ $# -ne 3 ]; then
echo "usage: $0 arch dir1 dir2"
exit 1
fi
here=$(pwd)
arch=$1
dir1=$2
dir2=$3
fdir1=$here/${arch}-$dir1
fdir2=$here/${arch}-$dir2
ldir2=$(cd $fdir2; pwd | sed -e 's/e\./a./')
of=$here/$arch-$dir1-$dir2.html
echo "<html><head><title>Logs that are in $dir1 but not in $dir2</title>" >$of
echo "<h1>Logs that are in $dir1 but not in $dir2</h1>" >>$of
echo "</head><body>" >>$of
cd $fdir1
logs=$(find . -name \*.log -o -name \*.log.bz2 | sed -e 's/\.log\.bz2/\.log/g')
nlogs=$(echo $logs | wc -w)
if [ $nlogs -eq 0 ]; then
echo "No errors" >>$of;
else
num=0
echo "<table border=1>" >>$of
echo "<tr><th>Log</th></tr>" >>$of
for i in $logs; do
if [ -f ${fdir2}/${i}.bz2 -o -f ${fdir2}/${i} ]; then
# foo
else
fname1=$(basename $i .bz2)
fname=$(basename $fname1 .log)
echo -n "<tr><td>" >>$of
echo -n "<a href=\"$arch-$dir1/index.html#$fname\">" >>$of
echo -n $fname >>$of
echo -n "</a>" >>$of
echo "</td></tr>" >>$of
num=$(($num + 1))
fi
done
echo "</table><br>" >> $of
echo "$num errors<br>" >> $of
fi
echo "<hr>" >> $of
echo "<a href=\"../\">back to top</a>" >> $of
echo "</body></html>" >>$of

View File

@ -1,54 +0,0 @@
#!/bin/sh
if [ $# -lt 3 ]; then
echo "usage: $0 arch branch buildid"
exit 1
fi
# configurable variables
pb=/var/portbuild
. ${pb}/conf/server.conf
arch=$1
branch=$2
buildid=$3
builddir=${pb}/${arch}/${branch}/builds/${buildid}
yesreally=0
dryrun=-n
cleanup=0
if [ "$4" = "-yesreally" ]; then
yesreally=1
dryrun=
elif [ "$4" = "-cleanup" ]; then
cleanup=1
fi
distdir=${builddir}/distfiles/
log=${builddir}/logs/.distfiles
if [ "${cleanup}" -eq 1 ]; then
echo "Removing distfiles"
rm -rf ${distdir} || exit 1
exit 0
fi
if [ -e ${distdir}/.pbtmp ]; then
echo "${distdir} has not been processed!"
exit 1
fi
rsync ${dryrun} -r -v -l -t \
--exclude RESTRICTED/ \
${builddir}/distfiles/ \
${UPLOAD_USER}@${UPLOAD_TARGET}:${UPLOAD_DIRECTORY} | \
tee ${log}
num=$(wc -l ${log} | awk '{print $1}')
if [ "$yesreally" = "0" ]; then
echo "--> Will transfer ${num} files - make sure this is what you want and rerun '$0 $* -yesreally'"
else
echo "--> Transferred ${num} files - results in ${log}"
echo " Now run '$0 $1 $2 $3 -cleanup' to remove distfiles and save space"
fi

View File

@ -1,34 +0,0 @@
#!/bin/sh
# configurable variables
pb=/var/portbuild
. ${pb}/portbuild.conf
lock=${pb}/cppackages.lock
unset DISPLAY
echo "Subject: package copying logs"
echo
echo "Called with arguments: "${1+"$@"}
echo "Started at $(date)"
if [ $# != 1 ]; then
echo "usage: $0 branch"
exit 1
fi
if [ -e ${lock} ]; then
echo "Skipped since lock file exists"
exit 1
fi
touch ${lock}
cd $pb
tar -cf - $1/packages | ssh $ftpsite -l $user tar -C $packagedir -xvf - 2>&1 | tail -100
echo "Ended at $(date)"
rm -f ${lock}

View File

@ -1,11 +0,0 @@
#!/bin/sh
#
# clean up stale chroots on all client machines. MCL 20081216.
#
# original version from root crontab:
#
#/var/portbuild/scripts/allgohans all -q /var/portbuild/scripts/cleanup-chroots
#
# new code:
#
/var/portbuild/scripts/allgohans.safe all -q /var/portbuild/scripts/cleanup-chroots

View File

@ -1,7 +0,0 @@
#!/bin/sh
if [ $# != 1 ]; then
echo "usage: $0 branch"
exit 1
fi
echo /var/portbuild/scripts/cppackages $1 | at + 1 minute
atq

View File

@ -1,76 +0,0 @@
#!/bin/sh
#
# Process a distfiles/ directory, efficiently moving files from
# .pbtmp/<package>/* into . (relative to distfiles/)
#
# We do this in several stages
#
# 1. Remove incomplete downloads where .pbtmp/<package>/.done does not
# exist (this is an incomplete transfer and may be corrupted)
#
# 2. Populate the directory hierarchy from .pbtmp/*/ into .
#
# 3. For each subdirectory under .pbtmp/*/, group them together by
# subdirectory name and move all of the files at once.
# e.g. .pbtmp/foo-1.0/dir/* and .pbtmp/bar-2.0/dir/* are
# processed at the same time (contents moved to ./dir/).
#
# 4. Once we have handled the subdirectories, everything left
# is a plain file in .pbtmp/*/ so we move those in bulk together
# into .
#
# 5. Clean up
if [ $# -ne 3 ]; then
echo "usage: $0 <arch> <branch> <buildid>"
exit 1
fi
arch=$1
branch=$2
buildid=$3
pb=/var/portbuild
builddir=${pb}/${arch}/${branch}/builds/${buildid}
distdir=${builddir}/distfiles
cd ${distdir} || exit 1
echo "Removing incomplete downloads"
# XXX MCL put an existance test here
find ${distdir}/.pbtmp/ -name .done -depth 2 | sed -e 's,/.done$,/,' | sort > .done || exit 1
find -d ${distdir}/.pbtmp/ -type d -mindepth 1 |sed -E -e 's,([^/])$,\1/,' > .alldirs || exit 1
sed -E -e "s,^(${distdir}/.pbtmp/[^/]+/).*,\1," < .alldirs | sort -u > .pkgdirs
comm -1 -3 .done .pkgdirs | xargs rm -rf
# Full path of subdirectories under package dirs
grep -E "^${distdir}/.pbtmp/[^/]+/.+/" .alldirs > .pkgsubdirs
# All subdirectories under package dirs
sed -E -e "s,^${distdir}/.pbtmp/[^/]+/,," < .pkgsubdirs | grep -v '^$' | sort -u > .subdirs
echo "Making directories"
cat .subdirs | xargs mkdir -p
# Move files in each subdir
for i in `cat .pkgsubdirs`; do
find ${i} -type f -depth 1 \! -name .done | xargs -J % mv % ${distdir}/${i#${distdir}/.pbtmp/*/}
# rmdir ${i} || exit 1
# rm -rf ${distdir}/.pbtmp/$i
done
cat .pkgsubdirs | xargs rmdir || exit 1
echo "Moving remaining distfiles"
# XXX MCL put an existance test here
find ${distdir}/.pbtmp/ -type f -depth 2 \! -name .done | xargs -J % mv % ${distdir}
echo "Cleaning up"
sed -e 's,$,.done,' < .pkgdirs | xargs rm -f || exit 1
cat .pkgdirs | xargs rmdir || exit 1
rmdir .pbtmp || exit 1
rm -f .alldirs .done .pkgdirs .pkgsubdirs .subdirs || exit 1

View File

@ -1,28 +0,0 @@
#!/bin/sh
# $FreeBSD$
arch=$1
versions="latest full"
pb=/var/portbuild
. ${pb}/conf/server.conf
. ${pb}/${arch}/portbuild.conf
home=${pb}/errorlogs
scripts=${pb}/scripts
branches=`cd ${pb}/${arch} && ls -d [1-9]* 2> /dev/null`
for version in ${versions}; do
for branch in ${branches}; do
dir=$home/$arch-$branch-$version
test -d $dir && cd $dir && ${scripts}/processlogs ${arch}
dir=${pb}/${arch}/${branch}
test -d $dir && cd $dir && ${scripts}/processfail ${arch} ${branch}
done
for branch in ${branches}; do
dir=$home/$arch-$branch-$version-logs
test -d $dir && cd $dir && ${scripts}/processlogs2
done
done

View File

@ -1,770 +0,0 @@
#!/bin/sh
# $FreeBSD$
# main server-side script to run a package build
# configurable variables
pb=/var/portbuild
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:${pb}/scripts
# writable by portmgr
umask 002
journalname="journal"
usage () {
echo "usage: arch branch buildid datestamp [-incremental] [-continue] [-restart] [-nofinish] [-finish] [-nocleanup] [-keep] [-nobuild] [-noindex] [-noduds] [-norestr] [-nochecksubdirs] [-nosrc] [-srccvs] [-noports] [-portscvs] [-noplistcheck] [-nodistfiles] [-fetch-original] [-cdrom] [-trybroken]"
# XXX MCL I think it's going to be too hard to move the create in here, now.
echo " -incremental : Start a new incremental build"
echo " -continue : Restart an interrupted build, skipping failed ports"
echo " -restart : Restart an interrupted build, rebuilding failed ports"
echo " -nofinish : Do not post-process upon build completion"
echo " -finish : Post-process a completed build"
echo " -nocleanup : Do not clean up and deactivate the build once it finishes"
echo " -keep : Do not automatically recycle this build"
echo " -nobuild : Only do the build preparation steps, do not build packages"
echo " -noindex : Do not build the INDEX"
echo " -noduds : Do not build the duds file"
echo " -nochecksubdirs : Do not check the SUBDIRS"
echo " -norestr : Do not build the restricted.sh file"
echo " -nosrc : Do not update the src tree"
echo " -srccvs : Update the src tree via CVS, don't use a pre-existing snapshot"
echo " -noports : Do not update the ports tree"
echo " -portscvs : Update the ports tree via CVS, don't use a pre-existing snapshot"
echo " -noplistcheck : Don't check the plist during the build"
echo " -nodistfiles : Don't collect distfiles"
echo " -fetch-original : Fetch from original MASTER_SITE"
echo " -cdrom : Prepare a build for distribution on CDROM "
echo " -trybroken : Try to build BROKEN ports"
exit 1
}
if [ $# -lt 4 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
datestamp=$4
shift 4
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/scripts/buildenv
validate_env ${arch} ${branch} || usage
# XXX MCL too early to do this here.
buildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${buildid}" ]; then
echo "Invalid build ID ${buildid}"
exit 1
fi
if [ -f ${pb}/${arch}/portbuild.conf ]; then
. ${pb}/conf/server.conf
. ${pb}/${arch}/portbuild.conf
else
usage
fi
pbab=${pb}/${arch}/${branch}
trap "exit 1" 1 2 3 9 10 11 15
mailexit () {
echo | mail -s "$(basename $0) ended for ${arch}-${branch} ${buildid} at $(date)" ${mailto}
exit $1
}
srctar() {
rm -f ${builddir}/src-2*.tbz*
tar cfCj ${builddir}/src-${buildid}.tbz ${builddir} src/ 2>/dev/null
md5 ${builddir}/src-${buildid}.tbz > ${builddir}/src-${buildid}.tbz.md5
}
portstar() {
rm -f ${builddir}/ports-2*.tbz*
tar cfCj ${builddir}/ports-${buildid}.tbz ${builddir} ports/ 2>/dev/null
md5 ${builddir}/ports-${buildid}.tbz > ${builddir}/ports-${buildid}.tbz.md5
}
# usage: makeindex pb arch branch builddir
# note: can take ~24 minutes!
makeindex () {
pb=$1
arch=$2
branch=$3
buildid=$4
builddir=$5
cd ${builddir}/ports
echo "================================================"
echo "generating index"
echo "================================================"
echo "index generation started at $(date)"
${pb}/scripts/makeindex ${arch} ${branch} ${buildid} || return 1
echo "index generation ended at $(date)"
echo $(wc -l ${INDEXFILE} | awk '{print $1}') "lines in INDEX"
# Save a copy of it for the next build since ports directories may
# not be preserved
cp ${INDEXFILE} ${builddir}/bak
}
# usage: checkindex builddir
# Perform some sanity checks on the INDEX so we don't blow up later on
checkindex () {
builddir=$1
cd ${builddir}/ports
if [ ! -f ${INDEXFILE} ]; then
echo "misssing INDEXFILE ${INDEXFILE} in ${builddir}/ports"
return 1
fi
if grep -q non-existent ${INDEXFILE}; then
echo "errors in INDEX:"
grep -n non-existent ${INDEXFILE}
return 1
fi
if ! awk -F '|' '{if (NF != 13) { error=1; printf("line %d: %s\n", NR, $0)}} END {if (error == 1) exit(1)}' ${INDEXFILE}; then
echo "error in INDEX"
return 1
fi
}
# usage: makeduds pb arch branch builddir
# note: can take ~21 minutes!
makeduds () {
pb=$1
arch=$2
branch=$3
buildid=$4
builddir=$5
cd ${builddir}/ports
echo "================================================"
echo "generating duds"
echo "================================================"
echo "duds generation started at $(date)"
if [ -e ${builddir}/duds ]; then
cp -p ${builddir}/duds ${builddir}/duds.old
fi
if ! ${pb}/scripts/makeduds ${arch} ${branch} ${buildid}; then
echo "error(s) detected, exiting script at $(date). Failed duds list was:"
cat ${builddir}/duds
mailexit 1
fi
echo "duds generation ended at $(date)"
echo $(wc -l ${builddir}/duds | awk '{print $1}') "items in duds"
if [ -f ${builddir}/duds.old ]; then
echo "duds diff:"
diff ${builddir}/duds.old ${builddir}/duds
else
echo "no previous duds to compare against."
fi
cp -p ${builddir}/duds ${builddir}/duds.orig
}
# usage: restrictedlist pb arch branch builddir
# note: can take ~25 minutes!
restrictedlist () {
pb=$1
arch=$2
branch=$3
buildid=$4
builddir=$5
cd ${builddir}/ports
echo "================================================"
echo "creating restricted list"
echo "================================================"
echo "restricted list generation started at $(date)"
${pb}/scripts/makerestr ${arch} ${branch} ${buildid} || return 1
echo "restricted list generation ended at $(date)"
echo $(grep -c '^#' ${builddir}/restricted.sh) "ports in ${builddir}/restricted.sh"
}
# usage: cdromlist pb arch branch builddir
# note: can take ~48 minutes!
cdromlist () {
pb=$1
arch=$2
branch=$3
builddir=$4
cd ${builddir}/ports
echo "================================================"
echo "creating cdrom list"
echo "================================================"
echo "cdrom list generation started at $(date)"
make ECHO_MSG=true clean-for-cdrom-list \
| sed -e "s./usr/ports/distfiles/./distfiles/.g" \
-e "s./usr/ports/./${branch}/.g" \
> ${builddir}/cdrom.sh
echo "cdrom list generation ended at $(date)"
echo $(grep -c '^#' ${builddir}/cdrom.sh) "ports in ${builddir}/cdrom.sh"
}
# XXX Should use SHA256 instead, but I'm not sure what consumes this file (if anything)
# XXX Should generate these as the packages are copied in, instead of all at once at the end
# usage: generatemd5 pb arch branch builddir
generatemd5 () {
pb=$1
arch=$2
branch=$3
builddir=$4
echo "started generating CHECKSUM.MD5 at $(date)"
cd ${builddir}/packages/All
find . -name '*.tbz' | sort | sed -e 's/^..//' | xargs md5 > CHECKSUM.MD5
echo "ended generating CHECKSUM.MD5 at $(date)"
}
dobuild() {
pb=$1
arch=$2
branch=$3
builddir=$4
echo "================================================"
echo "building packages"
echo "================================================"
echo "started at $(date)"
start=$(date +%s)
${QMANAGER_PATH}/packagebuild ${arch} ${branch} ${buildid} > ${builddir}/${journalname} 2>&1 < /dev/null
result=$?
if [ $result -ne 0 ]; then
echo "ERROR: packagebuild ${arch} ${branch} ${buildid} failed: see ${builddir}/${journalname} for details"
fi
echo "ended at $(date)"
end=$(date +%s)
echo "Build took $(date -u -j -r $((end - start)) | awk '{print $4}')"
echo $(ls -1 ${builddir}/packages/All | grep tbz | wc -l) "packages built"
echo $(wc -l ${PORTSDIR}/${INDEXFILE} | awk '{print $1}') "lines in INDEX"
echo $(echo $(du -sk ${builddir}/packages | awk '{print $1}') / 1024 | bc) "MB of packages"
echo $(echo $(du -sk ${builddir}/distfiles | awk '{print $1}') / 1024 | bc) "MB of distfiles"
cd ${builddir}
if grep -qE '(ptimeout|pnohang): killing' ${journalname}; then
echo "The following port(s) timed out:"
grep -E '(ptimeout|pnohang): killing' ${journalname} | sed -e 's/^.*ptimeout:/ptimeout:/' -e 's/^.*pnohang:/pnohang:/'
fi
}
me=$(hostname)
starttime=$(date +%s)
echo "Subject: $me package building logs"
echo
echo "Called with arguments: $@"
echo "Started at ${starttime}"
nobuild=0
noindex=0
noduds=0
nosrc=0
srccvs=0
noports=0
portscvs=0
norestr=0
nochecksubdirs=0
noplistcheck=0
cdrom=0
restart=0
cont=0
finish=0
nofinish=0
nodistfiles=0
fetch_orig=0
trybroken=0
incremental=0
keep=0
nocleanup=0
# optional arguments
while [ $# -gt 0 ]; do
case "x$1" in
x-nobuild)
nobuild=1
;;
x-noindex)
noindex=1
;;
x-noduds)
noduds=1
;;
x-cdrom)
cdrom=1
;;
x-nosrc)
nosrc=1
;;
x-srccvs)
srccvs=1
;;
x-noports)
noports=1
;;
x-portscvs)
portscvs=1
;;
x-norestr)
norestr=1
;;
x-nochecksubdirs)
nochecksubdirs=1
;;
x-noplistcheck)
noplistcheck=1
;;
x-nodistfiles)
nodistfiles=1
;;
x-fetch-original)
fetch_orig=1
;;
x-trybroken)
trybroken=1
;;
x-continue)
cont=1
;;
x-restart)
restart=1
;;
x-nofinish)
nofinish=1
;;
x-finish)
nobuild=1
finish=1
;;
x-incremental)
incremental=1
;;
x-keep)
keep=1
;;
x-nocleanup)
nocleanup=1
;;
*)
usage
;;
esac
shift
done
if [ "$restart" = 1 -o "$cont" = 1 -o "$finish" = 1 ]; then
skipstart=1
else
skipstart=0
fi
# XXX check for conflict between -noports and -portscvs etc
# We have valid options, start the build
if [ "$nodistfiles" = 1 ]; then
export NO_DISTFILES=1
fi
if [ "$noplistcheck" = 1 ]; then
export NOPLISTCHECK=1
fi
if [ "$cdrom" = 1 ]; then
export FOR_CDROM=1
fi
if [ "$fetch_orig" = 1 ]; then
export FETCH_ORIGINAL=1
fi
if [ "$trybroken" = 1 ]; then
export TRYBROKEN=1
fi
# Start setting up build environment
if [ "${skipstart}" -eq 0 ]; then
newbuildid=${datestamp}
# this is where the latest/previous dance is performed
# MCL note 20091109: buildid must exist. For now, use the following
# MCL manual command to start new buildenvs, before the first use of
# MCL dopackages: "build create arch branch"
build clone ${arch} ${branch} ${buildid} ${newbuildid}
buildid=${newbuildid}
fi
builddir=${pbab}/builds/${buildid}
# bomb out if build clone failed
if [ ! -d ${builddir} ]; then
mailexit 1
fi
# Set up our environment variables
buildenv ${pb} ${arch} ${branch} ${builddir}
# XXX MCL might not return 'latest' ???
echo | mail -s "$(basename $0) started for ${arch}-${branch} ${buildid} at $(date)" ${mailto}
# make necessary subdirectories if they don't exist
mkdir -p ${builddir}/bak/restricted || mailexit 1
if [ "${keep}" -eq 1 ]; then
touch ${builddir}/.keep
fi
# Mark as active so that it is not automatically cleaned up on the
# clients
touch ${builddir}/.active
# Update link to current logfile created by dopackages.wrapper
ln -sf ${pb}/${arch}/archive/buildlogs/log.${branch}.${datestamp} \
${builddir}/build.log
if [ "$skipstart" = 0 ]; then
# Update build
if [ "$incremental" = 1 ]; then
# Stash a copy of the index since we may be about to replace
# it with the ZFS update
if [ -f ${PORTSDIR}/${INDEXFILE} ]; then
cp ${PORTSDIR}/${INDEXFILE} ${builddir}/bak/${INDEXFILE}
fi
fi
if [ ${noports} -eq 0 ]; then
if [ -L ${builddir}/ports -o ${portscvs} -eq 1 ]; then
echo "================================================"
echo "running cvs update -PAd on ${PORTSDIR}"
echo "================================================"
cd ${PORTSDIR}
updated=$(date)
echo ${updated} > ${builddir}/.updated
cvs -Rq update -PdA -D "${updated}"
# XXX Check for conflicts
else
# echo "XXX at build portsupdate portsupdate ${arch} ${branch} ${buildid} $@ "
build portsupdate ${arch} ${branch} ${buildid} $@
# echo "XXX past build portsupdate portsupdate ${arch} ${branch} ${buildid} $@ "
fi
else
# XXX MCL why???
# XXX rm -f ${builddir}/.updated
fi
if [ "$incremental" = 1 ]; then
if [ -f ${builddir}/bak/${INDEXFILE} ]; then
cp ${builddir}/bak/${INDEXFILE} ${PORTSDIR}/${INDEXFILE}.old
fi
fi
# Create tarballs for distributing to clients. Should not cause
# much extra delay because we will do this in conjunction with
# recursing over the ports tree anyway just below, and might have
# just finished cvs updating, so it is likely to be in cache.
portstar &
if [ ${nosrc} -eq 0 ]; then
if [ -L ${builddir}/src -o ${srccvs} -eq 1 ]; then
echo "================================================"
echo "running cvs update -PAd on ${SRC_BASE}"
echo "================================================"
cd ${SRC_BASE}
if [ -z "${updated}" ]; then
# Don't overwrite/create .updated if we didn't set it
# with the ports update
updated=$(date)
fi
cvs -Rq update -PdA -D "${updated}"
# XXX Check for conflicts
else
build srcupdate ${arch} ${branch} ${buildid} $@
fi
fi
srctar &
# Begin build preprocess
cd ${PORTSDIR}
if [ "$nochecksubdirs" = 0 ]; then
echo "================================================"
echo "running make checksubdirs"
echo "================================================"
make checksubdirs
fi
# XXX MCL could background these?
# not run in background to check return status
if [ "$noindex" = 0 ]; then
makeindex ${pb} ${arch} ${branch} ${buildid} ${builddir} || mailexit 1
fi
checkindex ${builddir} || mailexit 1
if [ "$noduds" = 0 ]; then
makeduds ${pb} ${arch} ${branch} ${buildid} ${builddir} || mailexit 1
fi
wait # for tar creation
if [ "$trybroken" = 1 ]; then
echo "================================================"
echo "pruning stale entries from the failed ports list"
echo "================================================"
# XXX failure and newfailure are arch/branch-global for now. We
# will need to work out how to deal with updates from
# concurrent builds though (one build may fail after a more
# recent build has fixed the breakage)
if [ -f ${pbab}/failure ]; then
cp ${pbab}/failure ${builddir}/bak/
fi
if [ -f ${pbab}/newfailure ]; then
cp ${pbab}/newfailure ${builddir}/bak/
fi
lockf -k ${pbab}/failure.lock ${pb}/scripts/prunefailure ${arch} ${branch} ${builddir}
fi
# XXX These can happen after build start
if [ "$norestr" = 0 ]; then
restrictedlist ${pb} ${arch} ${branch} ${buildid} ${builddir} &
job_restrictedlist=$!
fi
if [ "$cdrom" = 1 ]; then
cdromlist ${pb} ${arch} ${branch} ${builddir} &
job_cdromlist=$!
fi
cd ${builddir}
if [ -d distfiles ]; then
mv distfiles .distfiles~
rm -rf .distfiles~ &
fi
mkdir -p distfiles/
olderrors=$(readlink ${builddir}/errors)
oldlogs=$(readlink ${builddir}/logs)
# XXX MCL hardcoding of archive/errorlogs
newerrors=${pb}/${arch}/archive/errorlogs/e.${branch}.${buildid}
newlogs=${pb}/${arch}/archive/errorlogs/a.${branch}.${buildid}
# Cycle out the previous symlinks
rm -f bak/errors
rm -f bak/logs
if [ -e errors ]; then
mv errors bak/
fi
if [ -e logs ]; then
mv logs bak/
fi
# Create new log directories for archival
rm -rf ${newerrors}
mkdir -p ${newerrors}
ln -sf ${newerrors} ${builddir}/errors
rm -rf ${newlogs}
mkdir -p ${newlogs}
ln -sf ${newlogs} ${builddir}/logs
echo "error logs in ${newerrors}"
if [ -f "${builddir}/.updated" ]; then
cp -p ${builddir}/.updated ${newerrors}/.updated
cp -p ${builddir}/.updated ${newlogs}/.updated
else
rm -f ${newerrors}/.updated ${newlogs}/.updated
fi
cp -p ${builddir}/duds ${newerrors}/duds
cp -p ${builddir}/duds ${newlogs}/duds
if [ -f "${builddir}/duds.verbose" ]; then
cp -p ${builddir}/duds.verbose ${newerrors}/duds.verbose
cp -p ${builddir}/duds.verbose ${newlogs}/duds.verbose
fi
cp -p ${builddir}/ports/${INDEXFILE} ${newerrors}/INDEX
cp -p ${builddir}/ports/${INDEXFILE} ${newlogs}/INDEX
if [ "$incremental" = 1 ]; then
# Copy back in the restricted packages that were saved after the
# previous build
if [ -d ${builddir}/bak/restricted/ ]; then
cd ${builddir}/bak/restricted
find . | cpio -dumpl ${builddir}
fi
cd ${builddir}
# Create hardlinks to previous set of logs
if [ ! -z "${oldlogs}" -a -d ${oldlogs} ]; then
cd ${oldlogs} && find . -name \*.log\* | cpio -dumpl ${newlogs}
fi
if [ ! -z "${olderrors}" -a -d ${olderrors} ]; then
cd ${olderrors} && find . -name \*.log\* | cpio -dumpl ${newerrors}
fi
# Identify the ports that have changed and thus whose packages
# need to be removed before rebuilding
cd ${PORTSDIR}
if [ -f ${INDEXFILE}.old ]; then
cut -f 1,2,3,8,9,11,12,13 -d \| ${INDEXFILE}.old | sort > ${INDEXFILE}.old1
cut -f 1,2,3,8,9,11,12,13 -d \| ${INDEXFILE} | sort > ${INDEXFILE}.1
comm -2 -3 ${INDEXFILE}.old1 ${INDEXFILE}.1 | cut -f 1 -d \| > ${builddir}/.oldports
echo "Removing $(wc -l ${builddir}/.oldports | awk '{print $1}') packages in preparation for incremental build"
rm ${INDEXFILE}.old1 ${INDEXFILE}.1
cd ${PACKAGES}/All
sed "s,$,${PKGSUFFIX}," ${builddir}/.oldports | xargs rm -f
# XXX MCL takes an unknown period of time.
# XXX MCL return value not checked.
${pb}/scripts/prunepkgs ${PORTSDIR}/${INDEXFILE} ${PACKAGES}
cd ${builddir}/errors/
sed "s,\$,.log," ${builddir}/.oldports | xargs rm -f
sed "s,\$,.log.bz2," ${builddir}/.oldports | xargs rm -f
cd ${builddir}/logs/
sed 's,$,.log,' ${builddir}/.oldports | xargs rm -f
sed 's,$,.log.bz2,' ${builddir}/.oldports | xargs rm -f
fi
else
cd ${builddir}
if [ -d packages ]; then
# echo "XXX at mv packages .packages~"
mv packages .packages~
rm -rf .packages~ &
# echo "XXX past mv packages .packages~"
fi
mkdir -p packages/All
fi
wait $job_restrictedlist || mailexit 1
wait $job_cdromlist || mailexit 1
fi # if [ "$skipstart" = 0 ]
# only need to wait for some tasks, so this is probably redundant.
wait
if [ "$nobuild" = 0 ]; then
cd ${builddir}
if [ "$cont" = 1 ]; then
find errors/ -name \*.log | sed -e 's,\.log$,,' -e 's,^errors/,,' > duds.errors
cat duds duds.errors | sort -u > duds.new
mv duds.new duds
else
cp duds.orig duds
fi
dobuild ${pb} ${arch} ${branch} ${builddir}
fi
# Clean up temporary duds file
if [ "$cont" = 1 ]; then
cp duds.orig duds
fi
cd ${builddir}/packages/All
if [ "$nofinish" = 0 ]; then
rm -f Makefile
if [ "$norestr" = 0 ]; then
# Before deleting restricted packages, save a copy so we don't
# have to rebuild them next time
${pb}/scripts/keeprestr ${arch} ${branch} ${buildid}
else
rm -rf ${builddir}/bak/restricted/
fi
# Always delete restricted packages/distfiles since they're
# published on the website
echo "deleting restricted ports"
sh ${builddir}/restricted.sh
if [ "$cdrom" = 1 ]; then
echo "deleting cdrom restricted ports"
sh ${builddir}/cdrom.sh
fi
# Remove packages not listed in INDEX
${pb}/scripts/prunepkgs ${builddir}/ports/${INDEXFILE} ${builddir}/packages
fi
# XXX Checking for bad packages should be done after the package is uploaded
#rm -rf ${builddir}/bad
#mkdir -p ${builddir}/bad
#echo "checking packages"
#for i in *${PKGSUFFIX}; do
# if ! ${PKGZIPCMD} -t $i; then
# echo "Warning: package $i is bad, moving to ${builddir}/bad"
# # the latest link will be left behind...
# mv $i ${builddir}/bad
# rm ../*/$i
# fi
#done
if [ "$nofinish" = 0 ]; then
generatemd5 ${pb} ${arch} ${branch} ${builddir} &
# Remove INDEX entries for packages that do not exist
${pb}/scripts/chopindex ${builddir}/ports/${INDEXFILE} ${builddir}/packages > ${builddir}/packages/INDEX
# Copy UPDATING and MOVED into the packages folder
cp ${builddir}/ports/UPDATING ${builddir}/packages/UPDATING
cp ${builddir}/ports/MOVED ${builddir}/packages/MOVED
for f in INDEX MOVED UPDATING; do
bzip2 -k ${builddir}/packages/$f
md5 ${builddir}/packages/$f.bz2 > ${builddir}/packages/$f.bz2.md5
sha256 ${builddir}/packages/$f.bz2 > ${builddir}/packages/$f.bz2.sha256
done
ls -asFlrt ${builddir}/packages/All > ${builddir}/logs/ls-lrt
cp -p ${builddir}/${journalname} ${builddir}/logs
echo "================================================"
echo "copying distfiles"
echo "================================================"
echo "started at $(date)"
cd ${builddir}
${pb}/scripts/dodistfiles ${arch} ${branch} ${buildid}
# Always delete restricted distfiles
echo "deleting restricted distfiles"
sh ${builddir}/restricted.sh
if [ "$cdrom" = 1 ]; then
echo "deleting cdrom restricted distfiles"
sh ${builddir}/cdrom.sh
fi
wait
fi
if [ "${nocleanup}" -eq 1 ]; then
echo "Not cleaning up build, when you are finished be sure to run:"
echo " ${pb}/scripts/build cleanup ${arch} ${branch} ${buildid} -full"
else
${pb}/scripts/build cleanup ${arch} ${branch} ${buildid} -full
fi
endtime=$(date +%s)
echo "================================================"
echo "all done at $(date)"
echo "entire process took $(date -u -j -r $(($endtime - $starttime)) | awk '{print $4}')"
echo "================================================"
exit 0

View File

@ -1,54 +0,0 @@
#!/bin/sh
# $FreeBSD$
# server-side script which wraps the dopackages script
# NOTE: rearranged 20100615 to not have to be linked to dopackages.N
# configurable variables
pb=/var/portbuild
if [ $# -lt 3 ]; then
echo "usage: $0 <arch> <branch> <buildid> [<args> ...]"
exit 1
fi
arch=$1
branch=$2
buildid=$3
shift 3
. ${pb}/conf/server.conf
. ${pb}/scripts/buildenv
if ! validate_env ${arch} ${branch} ; then
echo "dopackages.wrapper: invalid build environment: \"${arch}/${branch}\""
exit 1
fi
buildid2=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${buildid2}" ]; then
echo "dopackages.wrapper: build ID \"${buildid}\" does not exist for \"${arch}/${branch}\""
exit 1
fi
lock=${pb}/${arch}/${branch}/${buildid2}.lock
# datestamp will be used as buildid iff the latest/previous dance
# is needed, but in any case, it distinguishes all logfile names
# (e.g. for restarted and continued builds, there will be unique
# lognames for each attempt.)
datestamp=$(date '+%Y%m%d%H%M%S')"."`hostname -s`
logdir=${pb}/${arch}/archive/buildlogs
if [ ! -d ${logdir} ]; then
echo "use 'build create' to create the ${logdir} directory before trying dopackages"
exit 1
fi
logname=${logdir}/log.${branch}.${datestamp}
dorun() {
lockf -k -t 0 ${lock} ${pb}/scripts/dopackages ${arch} ${branch} ${buildid2} ${datestamp} $@ 2>&1 \
| tee ${logname}
}
dorun $@ || (echo "dopackages.wrapper: build failed."; exit 1)

View File

@ -1,71 +0,0 @@
#!/bin/sh
# configurable variables
pb=/var/portbuild
. ${pb}/portbuild.conf
lock=${pb}/lock
status=${pb}/status
date=$(date '+%Y%m%d%H')
shortdate=$(date '+%Y%m%d')
if [ -e ${lock} ]; then
# echo "Skipped package build since lock file exists" | sendmail $mailto
exit 1
fi
touch ${lock}
rm -f ${status}
mkdir -p ${pb}/archive/buildlogs
if [ -f ${pb}/scripts/dopackages.new ]; then
mv -f ${pb}/scripts/dopackages.new ${pb}/scripts/dopackages
fi
ln -sf ${pb}/archive/buildlogs/log.4.${date} ${pb}/4/build.log
ln -sf log.4.${date} ${pb}/archive/buildlogs/log.4.${shortdate}
${pb}/scripts/dopackages $@ 4 ${date} 2>&1 \
| tee ${pb}/archive/buildlogs/log.4.${date} \
| sendmail $mailto
if [ -f ${status} ]; then
rm -f ${lock}
exit "$(cat ${status})"
fi
if [ -f ${pb}/scripts/dopackages.new ]; then
mv -f ${pb}/scripts/dopackages.new ${pb}/scripts/dopackages
fi
ln -sf ${pb}/archive/buildlogs/log.5.${date} ${pb}/5/build.log
ln -sf log.5.${date} ${pb}/archive/buildlogs/log.5.${shortdate}
${pb}/scripts/dopackages -nocvsup $@ 5 ${date} 2>&1 \
| tee ${pb}/archive/buildlogs/log.5.${date} \
| sendmail $mailto
if [ -f ${status} ]; then
rm -f ${lock}
exit "$(cat ${status})"
fi
if [ -f ${pb}/scripts/dopackages.new ]; then
mv -f ${pb}/scripts/dopackages.new ${pb}/scripts/dopackages
fi
ln -sf ${pb}/archive/buildlogs/log.3.${date} ${pb}/3/build.log
ln -sf log.3.${date} ${pb}/archive/buildlogs/log.3.${shortdate}
${pb}/scripts/dopackages -nocvsup $@ 3 ${date} 2>&1 \
| tee ${pb}/archive/buildlogs/log.3.${date} \
| sendmail $mailto
if [ -f ${status} ]; then
rm -f ${lock}
exit "$(cat ${status})"
fi
cd ${pb}/archive/errorlogs/e.3.${date} && ${pb}/scripts/processlogs
cd ${pb}/archive/errorlogs/e.4.${date} && ${pb}/scripts/processlogs
cd ${pb}/archive/errorlogs/e.5.${date} && ${pb}/scripts/processlogs
cd ${pb}/archive/errorlogs
${pb}/scripts/comparelogs e.3.${date} e.4.${date}
${pb}/scripts/comparelogs e.4.${date} e.3.${date}
${pb}/scripts/comparelogs e.5.${date} e.4.${date}
${pb}/scripts/comparelogs e.4.${date} e.5.${date}
${pb}/scripts/bothlogs e.3.${date} e.4.${date} e.5.${date}
rm -f ${lock}

View File

@ -1,384 +0,0 @@
#!/bin/sh
# $FreeBSD$
#
# create HTML showing numbers of packages vs errors. Run this in a directory
# accessible to the web server.
#
pb=/var/portbuild
. ${pb}/conf/server.conf
here=`pwd`
tmp=`basename $0 | sed -e "s/^do//"`".html"
OUTFILE="${here}/${tmp}"
TMPFILE="${here}/.${tmp}"
#journalname="make"
journalname="journal"
# stylesheet seems like overkill for something this simple
TABLEBGCOLOR="#F0F0F0"
THCOLOR="#E0E0FF"
TDCOLOR_DONE="lightgreen"
TDCOLOR_NOT_DONE="lightyellow"
# subroutines
write_header () {
echo "<html>" > ${TMPFILE}
echo "<head>" >> ${TMPFILE}
echo "<title>FreeBSD package building statistics</title>" >> ${TMPFILE}
echo "</head>" >> ${TMPFILE}
echo "<body>" >> ${TMPFILE}
echo "<h1>FreeBSD package building statistics</h1>" >> ${TMPFILE}
echo "<p>as of `date`</p>" >> ${TMPFILE}
}
write_table_begin () {
echo "<table border='1' cellpadding='4' cellspacing='1' bgcolor='$TABLEBGCOLOR'>" >> ${TMPFILE}
echo "<tr>" >> ${TMPFILE}
echo "<td align='left' width='240' bgcolor='$TABLEBGCOLOR'>&nbsp;</td>" >> ${TMPFILE}
echo "<th width='60' bgcolor='$THCOLOR'>updated</th>" >> ${TMPFILE}
# MCL removed 20090808 -- this takes way too long
# echo "<th width='60' bgcolor='$THCOLOR'>latest log</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>INDEX</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>build logs</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>packages</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>errors</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>skipped</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>not yet built</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>queue length</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>running?</th>" >> ${TMPFILE}
echo "<th bgcolor='$THCOLOR'>completed?</th>" >> ${TMPFILE}
echo "</tr>" >> ${TMPFILE}
}
write_row () {
# first, gather data
arch=$1
build=$2
directory=${pb}/${arch}/${build}/builds/latest
journal=${directory}/${journalname}
branch=`echo $build | awk -F '-' '{print $1}'`
if [ "$branch" = "4" ]; then
indexfile=$directory/ports/INDEX
else
indexfile=$directory/ports/INDEX-$branch
fi
# work around the fact that 5-exp is really 6-exp-prime
if [ ! -f $indexfile ]; then
if [ -d $directory/ports ]; then
indexfile=$directory/ports/`cd $directory/ports 2> /dev/null && ls INDEX* 2> /dev/null | head -1`
else
# work around the fact that 4 is EOL and thus has no ports/ directory
indexfile=$directory/logs/`cd $directory/logs 2> /dev/null && ls INDEX* 2> /dev/null | head -1`
fi
fi
# column: date of ports update
have_updated=""
updated=""
if [ -f $directory/ports/.updated ]; then
updated="$(cat $directory/ports/.updated | awk '{printf("%s %s\n",$2,$3)}')"
if [ ! -z "$updated" ]; then
have_updated="yes"
fi
fi
# column: datestamp and URL of latest log
have_latest=""
latest=""
# MCL removed 20090808 -- this takes way too long
# if [ -d $directory/logs ]; then
# latest_suffix="$(cd $directory/logs 2> /dev/null && ls -rtTl | grep '\.log' | tail -1 | awk '{printf("%s\">%s %s</a>\n",$10,$6,$7)}')"
# if [ -z "$latest_suffix" ]; then
# latest="<a href=\"http://${MASTER_URL}/errorlogs/$arch-$build-latest-logs/$latest_suffix"
# have_latest="yes"
# fi
# fi
# column: INDEX count
have_index=""
n_index=0
if [ -f $indexfile ]; then
n_index=`cat $indexfile | wc -l`
have_index="yes"
fi
# column: buildlog count
have_logs=""
n_logs=0
if [ -d $directory/logs ]; then
n_logs=`ls $directory/logs | grep '\.log' | wc -l`
have_logs="yes"
fi
# column: package count
have_packages=""
n_packages=0
if [ -d $directory/packages/All ]; then
# MCL removed 20090808 -- this takes way too long
# n_packages=`find $directory/packages/All -name \*.tbz -or -name \*.tgz |wc -l`
n_packages=`ls $directory/packages/All | grep -v CHECKSUM.MD5 | wc -l`
have_packages="yes"
fi
# column: error count
have_errors=""
n_errors=0
if [ -d $directory/errors ]; then
n_errors=`ls $directory/errors | grep '\.log' | wc -l`
have_errors="yes"
fi
# column: duds count
have_duds=""
n_duds=0
if [ -f $directory/duds ]; then
n_duds=`cat $directory/duds | wc -l`
have_duds="yes"
fi
# if do not have any files, skip the row
if [ -z "$have_updated" -a \
-z "$have_latest" -a \
-z "$have_index" -a \
-z "$have_logs" -a \
-z "$have_packages" -a \
-z "$have_errors" -a \
-z "$have_duds" ]; then
return
fi
# column: not yet built count
# MCL 20080916 use n_packages, not n_logs; individual logs can be stale.
# (OTOH, so can packages (see sparc64-5) but this is possibly obsolete)
have_not_yet_built=""
n_not_yet_built=0
if [ ! -z "$have_index" -a \
! -z "$have_packages" -a \
! -z "$have_errors" -a \
! -z "$have_duds" ]; then
if [ $n_index -ne 0 ]; then
n_not_yet_built=`expr $n_index - $n_packages - $n_errors - $n_duds`
have_not_yet_built="yes"
# else index currently being rebuilt
fi
fi
# column: running flag
running_flag="N"
# the last grep eliminates false positive of i386-6-xyz for i386-6;
# if we are still running FreeBSD in the year 3000, s/2/3/
running_processes_for_build=`ps axww | \
grep "pdispatch $arch $build " | \
grep -v grep | \
sed -e "s@.*pdispatch @@;s@ /var/portbuild/scripts/.*@@;s@ @-@g" | \
grep "^$arch-$build-2"`
if [ ! -z "$running_processes_for_build" ]; then
running_flag="Y"
fi
# column: completed flag
completed_flag="N"
if [ -f $directory/build.log ]; then
if [ ! -z "`grep 'all done at ' $directory/build.log`" ]; then
completed_flag="Y"
fi
fi
# decorate the row to make everything less "gray"
if [ "$running_flag" = "N" -a "$completed_flag" = "Y" ]; then
cellcolor=$TDCOLOR_DONE
else
cellcolor=$TDCOLOR_NOT_DONE
fi
# queue length -PAV-
m_not_yet_built=""
queue_length=""
if [ "$completed_flag" = "N" -a -f $journal ]; then
m_not_yet_built=`tail -n 1000 $journal | grep MASTER | grep Queue | tail -1 | sed 's@.*remaining=@@ ; s@, Queue.*@@'`
queue_length=`tail -n 1000 $journal | grep MASTER | grep Queue | tail -1 | sed 's@.*length=@@'`
fi;
# now write the row
echo "<tr>" >> ${TMPFILE}
echo "<th align='left' bgcolor='$THCOLOR'>$arch-$build</th>" >> ${TMPFILE}
echo "<td align='left' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$have_updated" ]; then
echo "<a href='http://${MASTER_URL}/errorlogs/$arch-$build-latest/.updated'>" >> ${TMPFILE}
echo "$updated</a>" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
# MCL removed 20090808 -- this takes way too long
# echo "<td align='left' bgcolor='$cellcolor'>" >> ${TMPFILE}
# if [ ! -z "$have_latest" ]; then
# echo "$latest" >> ${TMPFILE}
# else
# echo "&nbsp;" >> ${TMPFILE}
# fi
# echo "</td>" >> ${TMPFILE}
# note: ports/INDEX-n is copied to a file called errorlogs/INDEX
echo "<td align='left' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$have_index" ]; then
echo "<a href='http://${MASTER_URL}/errorlogs/$arch-$build-latest/INDEX'>" >> ${TMPFILE}
echo "$n_index</a>" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='right' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$have_logs" ]; then
echo "<a href='http://${MASTER_URL}/errorlogs/$arch-$build-latest-logs'>" >> ${TMPFILE}
echo "$n_logs</a>" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='right' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$have_packages" ]; then
echo "<a href='http://${MASTER_URL}/errorlogs/$arch-$build-packages-latest/All'>" >> ${TMPFILE}
echo "$n_packages</a>" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='right' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$have_errors" ]; then
echo "<a href='http://${MASTER_URL}/errorlogs/$arch-$build-latest'>" >> ${TMPFILE}
echo "$n_errors</a>" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='right' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$have_duds" ]; then
echo "<a href='http://${MASTER_URL}/errorlogs/$arch-$build-latest/duds.verbose'>" >> ${TMPFILE}
echo "$n_duds</a>" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='right' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$m_not_yet_built" ]; then
echo "$n_not_yet_built" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='right' bgcolor='$cellcolor'>" >> ${TMPFILE}
if [ ! -z "$queue_length" ]; then
echo "$queue_length" >> ${TMPFILE}
else
echo "&nbsp;" >> ${TMPFILE}
fi
echo "</td>" >> ${TMPFILE}
echo "<td align='center' bgcolor='$cellcolor'>$running_flag</td>" >> ${TMPFILE}
echo "<td align='center' bgcolor='$cellcolor'>$completed_flag</td>" >> ${TMPFILE}
echo "</tr>" >> ${TMPFILE}
}
write_table_end () {
echo "</table>" >> ${TMPFILE}
echo "<br>" >> ${TMPFILE}
}
write_footer () {
echo "<p>explanation of columns:</p>" >> ${TMPFILE}
echo "<ul>" >> ${TMPFILE}
# MCL removed 20090808 -- this takes way too long
#echo "<li><b>latest log</b> is the date of the latest logfile.</li>" >> ${TMPFILE}
echo "<li><b>updated</b> is the date of the latest tree update done by the script. It may be inaccurate if a manual checkout was done later.</li>" >> ${TMPFILE}
echo "<li><b>INDEX</b> is number of ports in the INDEX file built from the latest tree update.</li>" >> ${TMPFILE}
echo "<li><b>build logs</b> is number of packages attempted. Note: if a run was restarted, you may see duplicates here.</li>" >> ${TMPFILE}
echo "<li><b>packages</b> is number of packages successfully built. Note: if a run was restarted, you may see duplicates here.</li>" >> ${TMPFILE}
echo "<li><b>errors</b> is number of packages that failed. Note: if a run was restarted, you may see duplicates here.</li>" >> ${TMPFILE}
echo "<li><b>skipped</b> is number of packages that were skipped due to NO_PACKAGE, IGNORE, BROKEN, FORBIDDEN, and so forth (\"duds\" file).</li>" >> ${TMPFILE}
echo "<li><b>not yet built</b> is the INDEX column minus the build logs plus the errors plus the skipped. These are packages that have not been built for one reason or another. Note: interrupted and/or restarted builds can make this number inaccurate because of the duplicates, above.</li>" >> ${TMPFILE}
echo "<li><b>running</b> is whether there are still processes running.</li>" >> ${TMPFILE}
echo "<li><b>completed</b> is whether that build terminated normally or not, as seen from the logfile.</li>" >> ${TMPFILE}
echo "</ul>" >> ${TMPFILE}
# no longer true 20080917
# echo "<p>notes:</p>" >> ${TMPFILE}
# echo "<ul>" >> ${TMPFILE}
# echo "<li>on the -exp builds, editors/openoffice.org* are skipped to save time.</li>" >> ${TMPFILE}
# echo "</ul>" >> ${TMPFILE}
echo "</body>" >> ${TMPFILE}
echo "</html>" >> ${TMPFILE}
}
# main
write_header
# display all the mainstream builds first
# (i.e. where build = branch, e.g. "7", "10")
for arch in ${SUPPORTED_ARCHS}; do
cd ${pb}/${arch}
builds=`ls | \
grep "${SRC_BRANCHES_PATTERN}$" | \
sort -n`
if [ ! -z "$builds" ]; then
write_table_begin
for build in ${builds}; do
write_row ${arch} ${build}
done
write_table_end
fi
done
# then display all the non-mainstream builds (probably only of interest
# to portmgr; would break up the logical flow of the above)
# examples: 8.1; 8-exp; 8-exp-gettext; 8.1R
for arch in ${SUPPORTED_ARCHS}; do
cd ${pb}/${arch}
branches=`ls | \
grep "${SRC_BRANCHES_PATTERN}[-\.]" | \
sed -e "s@[-\.].*@@" | \
uniq | \
sort -n`
if [ ! -z "$branches" ]; then
for branch in $branches; do
builds=`ls -d $branch* | \
grep -v "${SRC_BRANCHES_PATTERN}$" | \
sort`
if [ ! -z "$builds" ]; then
write_table_begin
for build in ${builds}; do
write_row ${arch} ${build}
done
write_table_end
fi
done
fi
done
write_footer
mv -f ${TMPFILE} ${OUTFILE}

View File

@ -1,191 +0,0 @@
#!/bin/sh
# $FreeBSD$
# server-side script to set up an individual client
# XXX Use a worker pool that only runs N setups at once to avoid
# raping the server. Hard to do in shell?
# -norsync|-nocopy : Don't copy files, just clean up builds
#
# -force : force file copying/extraction even if it appears it is
# up-to-date
#
# NB: branch or buildid might be "-" to specify only to set up the
# scripts/ and ${arch}/ directories (e.g. after client reboot)
# configurable variables
pb=/var/portbuild
arch=$1
branch=$2
buildid=$3
nodelist=$4
shift 4
. ${pb}/conf/server.conf
if [ -f ${pb}/${arch}/portbuild.conf ]; then
. ${pb}/${arch}/portbuild.conf
else
echo "Invalid arch ${arch}"
exit 1
fi
. ${pb}/scripts/buildenv
# Check for non-fatal rsync errors
checkerror() {
error=$?
case $error in
0)
return 0
;;
23)
echo "Continuing..."
return 0
;;
*)
echo "Aborting..."
return 1
;;
esac
}
setup() {
node=$1
echo "setting up of $node started at $(date)"
. ${pb}/${arch}/portbuild.conf
. ${pb}/${arch}/portbuild.${node}
${scp_cmd} -q -p ${pb}/scripts/setupnode ${client_user}@${node}:/tmp || return 1
client_setup="${ssh_cmd} -n ${client_user}@${node} sh /tmp/setupnode ${pb} ${arch} ${branch} ${buildid} ${scratchdir} \"${portsmd5}\" \"${srcmd5}\" \"${bindistmd5}\""
args="${nocopy} ${force}"
${client_setup} pre-copy ${args} || (echo "pre-copy for ${node} failed"; return 1)
if [ "${norsync}" -eq 0 ]; then
rsync ${rsync_gzip} -e "${ssh_cmd}" -r -l -p --delete ${pb}/scripts \
${client_user}@${node}:${pb}/
checkerror $? || (echo "Copying scripts to ${node} failed"; return 1)
# client.conf and common.conf can be symlinks outside this dir, so
# copy the actual files
rsync ${rsync_gzip} -e "${ssh_cmd}" -r -L -p --delete ${pb}/${arch}/*.conf \
${client_user}@${node}:${pb}/${arch}
checkerror $? || (echo "copying *.conf to ${node} failed"; return 1)
# portbuild.* can be symlinks outside this dir, so copy the actual
# files
rsync ${rsync_gzip} -e "${ssh_cmd}" -r -L -p --delete ${pb}/${arch}/portbuild* \
${client_user}@${node}:${pb}/${arch}
checkerror $? || (echo "copying portbuild* files to ${node} failed"; return 1)
if [ -f "${pb}/${arch}/clients/bindist-${node}.tar" ]; then
rsync ${rsync_gzip} -e "${ssh_cmd}" -r -L -p --delete \
${pb}/${arch}/clients/bindist-${node}.tar \
${client_user}@${node}:${pb}/${arch}/clients/
checkerror $? || (echo "Copying bindist-${node}.tar to ${node} failed"; return 1)
else
echo "Host customization file not found: ${pb}/${arch}/clients/bindist-${node}.tar"
return 1
fi
if [ "${buildid}" != "-" ]; then
rsync ${rsync_gzip} -e "${ssh_cmd}" -r -L -p \
${builddir}/ports-${buildid}.tbz \
${builddir}/ports-${buildid}.tbz.md5 \
${builddir}/src-${buildid}.tbz \
${builddir}/src-${buildid}.tbz.md5 \
${builddir}/bindist.tbz \
${builddir}/bindist.tbz.md5 \
${client_user}@${node}:${builddir}/
checkerror $? || (echo "Copying build tarballs to ${node} failed"; return 1)
fi
fi
${client_setup} post-copy ${args} || (echo "post-copy for ${node} failed"; return 1)
if [ "${queue}" -eq 1 ]; then
jobs=$(python /var/portbuild/evil/qmanager/qclient jobs | grep "${node}" | grep "${arch}/${branch}/${buildid} package" | awk '{print $1}' | tail +1)
for j in ${jobs}; do
python /var/portbuild/evil/qmanager/qclient release $j
done
fi
if [ "${full}" -eq 1 ]; then
${ssh_cmd} ${client_user}@${node} ${sudo_cmd} rm -rf ${pb}/${arch}/${branch}/builds/${buildid}/.ready ${pb}/${arch}/${branch}/builds/${buildid} /tmp/.setup-${buildid}
fi
echo "setting up of $node ended at $(date)"
}
pbab=${pb}/${arch}/${branch}
norsync=0
queue=0
full=0
while [ $# -ge 1 ]; do
case $1 in
-norsync|-nocopy)
norsync=1
nocopy=-nocopy
;;
-queue)
queue=1
;;
-force)
force=-force
;;
-full)
full=1
;;
esac
shift
done
if [ "${norsync}" -eq 0 ]; then
if [ "${branch}" != "-" -a "${buildid}" != "-" ]; then
buildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${buildid}" ]; then
echo "Invalid build ID ${buildid}"
exit 1
fi
builddir=${pbab}/builds/${buildid}
if [ ! -f ${builddir}/ports-${buildid}.tbz.md5 ]; then
echo "ports-${buildid}.tbz.md5 not found"
exit 1
else
portsmd5=$(awk '{print $4}' ${builddir}/ports-${buildid}.tbz.md5)
fi
if [ ! -f ${builddir}/src-${buildid}.tbz.md5 ]; then
echo "src-${buildid}.tbz.md5 not found"
exit 1
else
srcmd5=$(awk '{print $4}' ${builddir}/src-${buildid}.tbz.md5)
fi
if [ ! -f ${builddir}/bindist.tbz.md5 ]; then
echo "bindist.tbz.md5 not found"
exit 1
else
bindistmd5=$(awk '{print $4}' ${builddir}/bindist.tbz.md5)
fi
fi
fi
if [ "${nodelist}" = "all" ]; then
nodelist=$(cat ${pb}/${arch}/mlist)
fi
for node in ${nodelist}; do
setup ${node} &
done
wait

View File

@ -1,20 +0,0 @@
#!/bin/sh
# $FreeBSD$
# client script to be manually run to flush squid cache, whenever needed
# note: uname is not being overridden
arch=$(uname -m)
pb=/var/portbuild
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/portbuild.conf
host=$(hostname)
test -f ${pb}/${arch}/portbuild.${host} && . ${pb}/${arch}/portbuild.${host}
if [ ! -z "${squid_dir}" ] ; then
/usr/local/etc/rc.d/squid stop
/usr/local/etc/rc.d/squid poll
echo "" > ${squid_dir}/cache/swap.state
/usr/local/etc/rc.d/squid start
fi

View File

@ -1,37 +0,0 @@
#!/bin/sh
# $FreeBSD$
# server-side script to save off RESTRICTED files
usage () {
echo "usage: keeprestr arch branch buildid"
exit 1
}
if [ $# -ne 3 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
pb=/var/portbuild
. ${pb}/conf/server.conf
. ${pb}/scripts/buildenv
buildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${buildid}" ]; then
echo "Invalid build ID ${buildid}"
exit 1
fi
builddir=${pb}/${arch}/${branch}/builds/${buildid}
cd ${builddir}
rm -rf bak/restricted
mkdir -p bak/restricted
(tr ';' '\n' < restricted.sh | grep "/bin/rm -f" | awk '{print $3}' | grep packages/ | sed -e "s,${builddir}/,," -e 's,)$,,' | xargs ls -1 2>/dev/null) | cpio -dumpl bak/restricted/

View File

@ -1,53 +0,0 @@
#!/bin/sh
usage () {
echo "usage: makeduds arch branch buildid"
exit 1
}
if [ $# -ne 3 ]; then
usage
fi
# configurable variables
pb=/var/portbuild
arch=$1
branch=$2
buildid=$3
shift 3
builddir=${pb}/${arch}/${branch}/builds/${buildid}
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
# -j# to make duds
DUDSJOBS=4
buildenv ${pb} ${arch} ${branch} ${builddir}
duds=${builddir}/duds
index=${PORTSDIR}/${INDEXFILE}
unset DISPLAY
export __MAKE_SHELL=/rescue/sh
export LOCALBASE=/nonexistentlocal
export LINUXBASE=/nonexistentlinux
export PKG_DBDIR=/nonexistentpkg
export PORT_DBDIR=/nonexistentport
cd ${PORTSDIR}
make -j${DUDSJOBS} ignorelist-verbose ECHO_MSG=true > ${duds}.verbose 2> /dev/null || exit 1
sort ${duds}.verbose > ${duds}.verbose.tmp
mv -f ${duds}.verbose.tmp ${duds}.verbose
cut -f 1 -d \| ${duds}.verbose > ${duds}
cp ${duds} ${duds}.orig
grep -Ff ${duds}.orig ${index} | cut -f 1 -d \| > ${duds}.full
cat ${duds} ${duds}.full | sort | uniq -u | sed -e "s@\$@|IGNORE: dependent port@" > ${duds}.full.verbose.tmp
cat ${duds}.verbose ${duds}.full.verbose.tmp | sort > ${duds}.full.verbose
rm ${duds}.full.verbose.tmp

View File

@ -1,56 +0,0 @@
#!/bin/sh
# usage: $0 arch branch buildid
# Don't want to pick up host customizations
export INDEX_PRISTINE=1
# Don't give luser advice if it fails
export INDEX_QUIET=1
# Concurrency of index build
export INDEX_JOBS=6
# For debugging purposes only
#export INDEX_VERBOSE=1
pb=/var/portbuild
usage () {
echo "usage: makeindex arch branch buildid"
exit 1
}
if [ $# -ne 3 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
shift 3
builddir=${pb}/${arch}/${branch}/builds/${buildid}
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
# Set up the build env variables
buildenv ${pb} ${arch} ${branch} ${builddir}
unset DISPLAY
# Don't pick up installed packages from the host
export LOCALBASE=/nonexistentlocal
cd ${PORTSDIR}
make index
if [ ! -e ${INDEXFILE} ]; then
echo "makeindex: failed to make ${INDEXFILE}"
exit 1
fi
# remove extra spaces in dependency list -- this causes problems
# Also transform the dummy paths to their canonical locations
sed -i '' -e 's/ */ /g' -e 's/| */|/g' -e 's/ *|/|/g' -e "s,${LOCALBASE},/usr/local," ${INDEXFILE}

View File

@ -1,39 +0,0 @@
#!/bin/sh
usage () {
echo "usage: makerestr arch branch buildid"
exit 1
}
if [ $# -ne 3 ]; then
usage
fi
# configurable variables
pb=/var/portbuild
arch=$1
branch=$2
buildid=$3
shift
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
builddir=${pb}/${arch}/${branch}/builds/${buildid}
buildenv ${pb} ${arch} ${branch} ${builddir}
unset DISPLAY
export __MAKE_SHELL=/rescue/sh
export LOCALBASE=/nonexistentlocal
export LINUXBASE=/nonexistentlinux
export PKG_DBDIR=/nonexistentpkg
export PORT_DBDIR=/nonexistentport
cd ${PORTSDIR}
make -j4 ECHO_MSG=true clean-restricted-list \
| sed -e "s!/usr/ports/packages/!${builddir}/packages/!g" \
-e "s!/usr/ports/!${builddir}/ports/!g" \
> ${builddir}/restricted.sh 2> /dev/null

View File

@ -1,87 +0,0 @@
#!/bin/sh
#
# XXX lockfile and interlock with mkbindist to avoid overlapping
# builds
if [ $# -lt 3 ]; then
echo "usage: makeworld arch branch buildid [args]"
exit 1
fi
arch=$1
branch=$2
buildid=$3
shift 3
pb=/var/portbuild
builddir=${pb}/${arch}/${branch}/builds/${buildid}
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
# NB: we can't use buildenv because it sets ARCH and MACHINE_ARCH that
# confuses cross-builds
export TARGET_ARCH=${arch}
# Workaround needed for zfs - 20090321 erwin
export NO_FSCHG=1
client=0
nocvs=0
# optional arguments
while [ $# -gt 0 ]; do
case "$1" in
-client)
client=1
;;
-nocvs)
nocvs=1
;;
*)
args="$1 ${args}"
;;
esac
shift
done
# XXX MCL I don't know what this is supposed to do.
if [ "$client" = "1" ]; then
SRC_BASE=${pb}/${arch}/src-client
shift 1
else
SRC_BASE=${builddir}/src
export __MAKE_CONF=/dev/null
fi
cd ${SRC_BASE}
if [ "$nocvs" = "0" ]; then
echo "==> Updating source tree"
eval tag=\$SRC_BRANCH_${branch}_TAG
cvs -Rq update -PdA -r ${tag} || exit $?
fi
echo "==> Starting make buildworld"
make buildworld ${args} || exit $?
echo "==> Cleaning up destdir"
destdir=${WORLDDIR}/${arch}/${branch}
rm -rf ${destdir}/
chflags -R noschg ${destdir}/
rm -rf ${destdir}/
mkdir -p ${destdir} || exit $?
echo "==> Starting make installworld"
if [ "$client" = "0" ]; then
export NEWSPARC_TIMETYPE=__int64_t
make installworld DESTDIR=${destdir} || exit $?
echo "==> Starting make distribute"
make DESTDIR=${destdir} distrib-dirs && \
make DESTDIR=${destdir} distribution || exit $?
else
echo "==> Not doing installworld of client source tree"
fi

View File

@ -1,83 +0,0 @@
#!/bin/sh
# XXX merge with makeworld?
usage () {
echo "usage: mkbindist <arch> <branch> <buildid>"
exit 1
}
cleandir() {
dir=$1
rm -rf ${dir} 2>/dev/null
if [ -d ${dir} ]; then
chflags -R noschg ${dir}
rm -rf ${dir}
fi
}
if [ $# -lt 3 ]; then
usage
fi
arch=$1
branch=$2
buildid=$3
shift 3
pb=/var/portbuild
. ${pb}/conf/server.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
if ! validate_env ${arch} ${branch}; then
echo "Invalid build environment ${arch}/${branch}"
exit 1
fi
here=${pb}/${arch}/${branch}/builds/${buildid}
if [ ! -d ${here} ]; then
echo "Invalid build ID ${buildid}"
exit 1
fi
tmpdir=${here}/bindist/tmp
# Clean up ${tmpdir}
cleandir ${tmpdir}
mkdir -p ${tmpdir}
# Copy the files into the tmpdir from an existing built world
destdir=${WORLDDIR}/${arch}/${branch}
cd ${destdir}; find -dx . | \
grep -v -E '^./usr/(local|obj|opt|ports|src)' | \
grep -v '^./home' | \
grep -v '^./var/db/pkg' | \
cpio -dump ${tmpdir}
cd ${tmpdir}
# Customize the tmpdir
if [ -s "${here}/bindist/delete" ]; then
sed -e "s,^,${tmpdir}," ${here}/bindist/delete | xargs rm -rf
fi
if [ -s "${here}/bindist/dirlist" ]; then
cat "${here}/bindist/dirlist" | xargs mkdir -p
fi
# XXX MCL seems to be obsoleted by individual files in clients/?
if [ -d ${here}/bindist/files ]; then
cd ${here}/bindist/files; find -dx . | cpio -dump ${tmpdir}
fi
# Post-processing of installed world
date '+%Y%m%d' > ${tmpdir}/var/db/port.mkversion
# Create the tarball
tar cfCj ${here}/.bindist.tbz ${tmpdir} .
mv -f ${here}/.bindist.tbz ${here}/bindist.tbz
md5 ${here}/bindist.tbz > ${here}/bindist.tbz.md5
# Clean up
cd ${here}
cleandir ${tmpdir}

View File

@ -1,10 +0,0 @@
#!/bin/sh
#
# attempted manual workaround for the "bad dependency" problem
#
/usr/local/etc/rc.d/squid stop
rm -rf ~squid/cache/*
/usr/local/etc/rc.d/squid start
/usr/local/sbin/squid -z
sleep 5
/usr/local/sbin/squid -k rotate

View File

@ -1,597 +0,0 @@
#!/usr/bin/env python
# Improved build dispatcher. Invoked on server-side from dopackages.
# We try to build leaf packages (those
# which can be built immediately without requiring additional
# dependencies to be built) in the order such that the ones required
# by the longest dependency chains are built first.
#
# This has the effect of favouring deep parts of the package tree and
# evening out the depth over time, hopefully avoiding the situation
# where the entire cluster waits for a deep part of the tree to
# build on a small number of machines
#
# We can dynamically respond to changes in build machine availability,
# since the queue manager will block jobs that cannot be immediately
# satisfied and will unblock us when a job slot becomes available.
#
# When a package build fails, it is requeued with a lower priority
# such that it will rebuild again as soon as no "phase 1" packages
# are available to build. This prevents the cluster staying idle
# until the last phase 1 package builds.
#
# Other advantages are that this system is easily customizable and in
# the future will let us customize things like the matching policy of
# jobs to machines. For example, we could avoid dispatching multiple
# openoffice builds to the same system.
#
# TODO:
# * Combine build prep stages?
# - initial check for file up-to-date
# * check mtime for package staleness (cf make)
# * option to skip phase 2
from qmanagerclient import *
from freebsd_config import *
import os, string, sys, threading, time, subprocess
#import random
from itertools import chain
#import gc
from stat import *
from Queue import Queue
from heapq import *
CONFIG_DIR="/var/portbuild"
CONFIG_SUBDIR="conf"
CONFIG_FILENAME="server.conf"
config = getConfig( CONFIG_DIR, CONFIG_SUBDIR, CONFIG_FILENAME )
QMANAGER_PRIORITY_PACKAGES = string.split( \
config.get( 'QMANAGER_PRIORITY_PACKAGES' ) )
categories = {}
ports = {}
# When a build fails we requeue it with a lower priority such that it
# will never preempt a phase 1 build but will build when spare
# capacity is available.
PHASE2_BASE_PRIO=1000
# Process success quickly so other jobs are started
SUCCESS_PRIO = -1000
# Failure should be a less common event :)
FAILURE_PRIO = -900
# Port status codes
PENDING = 1 # Yet to build
PHASE2 = 2 # Failed once
class PriorityQueue(Queue):
"""Variant of Queue that retrieves open entries in
priority order (lowest first).
Entries are typically tuples of the form: (priority number,
data)
This class can be found at: Python-2.6a3/Lib/Queue.py
"""
maxsize = 0
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heappush):
heappush(self.queue, item)
def _get(self, heappop=heappop):
return heappop(self.queue)
class Index(object):
def __init__(self, indexfile):
self.indexfile = indexfile
def parse(self, targets = None):
print "[MASTER] Read index"
f = file(self.indexfile)
index = f.readlines()
f.close()
f = None
del f
lines=[]
print "[MASTER] Phase 1"
for i in index:
(name, path, prefix, comment, descr, maintainer, categories, bdep,
rdep, www, edep, pdep, fdep) = i.rstrip().split("|")
if targets is None or name in targets:
lines.append((name, bdep, rdep, edep, pdep, fdep))
Port(name, path, "", "", "", "",
categories, "")
index = None
del index
print "[MASTER] Phase 2"
for (name, bdep, rdep, edep, pdep, fdep) in lines:
ports[name].setdeps(bdep, rdep, edep, pdep, fdep)
lines = None
del lines
print "[MASTER] Done"
def depthindex(targets):
""" Initial population of depth tree """
for i in targets:
i.depth_recursive()
class Port(object):
def __init__(self, name, path, prefix, comment, descr, maintainer,
cats, www):
__slots__ = ["name", "path", "prefix", "comment", "descr",
"maintainer", "www", "bdep", "rdep", "edep", "pdep",
"fdep", "alldep", "parents", "depth", "categories"]
self.name = name
self.path = path
self.prefix = prefix
self.comment = comment
self.descr = descr
self.maintainer = maintainer
self.www = www
# Populated later
self.bdep = []
self.rdep = []
self.edep = []
self.pdep = []
self.fdep = []
self.alldep = []
self.parents = []
self.id = None # XXX
self.status = PENDING
# Whether the package build has completed and is hanging around
# to resolve dependencies for others XXX use status
self.done = False
# Depth is the maximum length of the dependency chain of this port
self.depth = None
self.categories=[]
scats = cats.split()
if len(scats) != len(set(scats)):
print "[MASTER] Warning: port %s includes duplicated categories: %s" % (name, cats)
for c in set(scats):
try:
cat = categories[c]
except KeyError:
cat = Category(c)
self.categories.append(cat)
cat.add(self)
ports[name] = self
def remove(self):
""" Clean ourselves up but don't touch references in other objects;
they still need to know about us as dependencies etc """
self.fdep = None
self.edep = None
self.pdep = None
self.bdep = None
self.rdep = None
self.alldep = None
self.parents = None
for cat in self.categories:
cat.remove(self)
ports[self.name] = None
del ports[self.name]
del self
def destroy(self):
""" Remove a package and all references to it """
for pkg in self.alldep:
if pkg.parents is not None:
# Already removed but not destroyed
try:
pkg.parents.remove(self)
except ValueError:
continue
for pkg in self.parents:
try:
pkg.fdep.remove(self)
except ValueError:
pass
try:
pkg.edep.remove(self)
except ValueError:
pass
try:
pkg.pdep.remove(self)
except ValueError:
pass
try:
pkg.bdep.remove(self)
except ValueError:
pass
try:
pkg.rdep.remove(self)
except ValueError:
pass
pkg.alldep.remove(self)
sys.exc_clear()
self.remove()
def setdeps(self, bdep, rdep, edep, pdep, fdep):
self.fdep = [ports[p] for p in fdep.split()]
self.edep = [ports[p] for p in edep.split()]
self.pdep = [ports[p] for p in pdep.split()]
self.bdep = [ports[p] for p in bdep.split()]
self.rdep = [ports[p] for p in rdep.split()]
self.alldep = list(set(chain(self.fdep, self.edep, self.pdep,
self.bdep, self.rdep)))
for p in self.alldep:
p.parents.append(self)
def depth_recursive(self):
"""
Recursively populate the depth tree up from a given package
through dependencies, assuming empty values on entries not yet
visited
"""
if self.depth is None:
if len(self.parents) > 0:
max = 0
for i in self.parents:
w = i.depth_recursive()
if w > max:
max = w
self.depth = max + 1
else:
self.depth = 1
for port in QMANAGER_PRIORITY_PACKAGES:
if self.name.startswith(port):
# Artificial boost to try and get it building earlier
self.depth = 100
return self.depth
def destroy_recursive(self):
""" Remove a port and everything that depends on it """
parents=set([self])
while len(parents) > 0:
pkg = parents.pop()
assert pkg.depth is not None
parents.update(pkg.parents)
pkg.destroy()
def success(self):
""" Build succeeded and possibly uncovered some new leaves """
parents = self.parents[:]
self.done = True
self.remove()
newleafs = [p for p in parents if all(c.done for c in p.alldep)]
return newleafs
def failure(self):
""" Build failed """
self.destroy_recursive()
def packagename(self, arch, branch, buildid):
""" Return the path where a package may be found"""
return "/var/portbuild/%s/%s/builds/%s/packages/All/%s.tbz" \
% (arch, branch, buildid, self.name)
def is_stale(self, arch, branch, buildid):
""" Does a package need to be (re)-built?
Returns: False: if it exists and has newer mtime than all of
its dependencies.
True: otherwise
"""
my_pkgname = self.packagename(arch, branch, buildid)
pkg_exists = os.path.exists(my_pkgname)
if pkg_exists:
my_mtime = os.stat(my_pkgname)[ST_MTIME]
dep_packages = [pkg.packagename(arch, branch, buildid)
for pkg in self.alldep]
deps_exist = all(os.path.exists(pkg) for pkg in dep_packages)
return not (pkg_exists and deps_exist and
all(os.stat(pkg)[ST_MTIME] <= my_mtime
for pkg in dep_packages))
class Category(object):
def __init__(self, name):
self.name = name
self.ports = {}
categories[name] = self
def add(self, port):
self.ports[port] = port
def remove(self, port):
self.ports[port]=None
del self.ports[port]
def gettargets(targets):
""" split command line arguments into list of packages to build.
Returns set or iterable of all ports that will be built including
dependencies """
plist = set()
if len(targets) == 0:
targets = ["all"]
for i in targets:
if i == "all":
return ports.itervalues()
if i.endswith("-all"):
cat = i.rpartition("-")[0]
plist.update(p.name for p in categories[cat].ports)
elif i.rstrip(".tbz") in ports:
plist.update([ports[i.rstrip(".tbz")].name])
else:
raise KeyError, i
# Compute transitive closure of all dependencies
pleft=plist.copy()
while len(pleft) > 0:
pkg = pleft.pop()
new = [p.name for p in ports[pkg].alldep]
plist.update(new)
pleft.update(new)
for p in set(ports.keys()).difference(plist):
ports[p].destroy()
return [ports[p] for p in plist]
class worker(threading.Thread):
# Protects threads
lock = threading.Lock()
# Running threads, used for collecting status
threads = {}
def __init__(self, mach, job, arch, branch, buildid, queue):
threading.Thread.__init__(self)
self.machine = mach
self.job = job
self.arch = arch
self.branch = branch
self.buildid = buildid
self.queue = queue
self.setDaemon(True)
def run(self):
pkg = self.job
print "[MASTER] Running job %s" % (pkg.name),
if pkg.status == PHASE2:
print " (phase 2)"
else:
print
try:
runenv={'HOME':"/root",
'PATH':'/sbin:/bin:/usr/sbin:/usr/bin:/usr/games:/usr/local/sbin:/usr/local/bin:/var/portbuild/scripts',
'FD':" ".join(["%s.tbz" % p.name for p in pkg.fdep]),
'ED':" ".join(["%s.tbz" % p.name for p in pkg.edep]),
'PD':" ".join(["%s.tbz" % p.name for p in pkg.pdep]),
'BD':" ".join(["%s.tbz" % p.name for p in pkg.bdep]),
'RD':" ".join(["%s.tbz" % p.name for p in pkg.rdep])}
for var in ["NOCLEAN", "NO_RESTRICTED", "NOPLISTCHECK", "NO_DISTFILES", "FETCH_ORIGINAL", "TRYBROKEN" ]:
if var in os.environ:
runenv[var] = os.environ.get(var)
build = subprocess.Popen(
["/bin/sh", "/var/portbuild/scripts/pdispatch",
self.arch, self.branch, self.buildid, self.machine,
"/var/portbuild/scripts/portbuild", "%s.tbz" % pkg.name,
pkg.path],
env=runenv,
stderr=subprocess.STDOUT, stdout=subprocess.PIPE, bufsize=0)
except OSError, e:
print >>sys.stderr, "[%s:%s]: Execution failed: %s" % \
(pkg.id, pkg.name, e)
while True:
try:
line = build.stdout.readline()
except:
print "[%s:%s]: Failed reading from build script" % \
(pkg.id, pkg.name)
break
if line == "":
break
print "[%s:%s] %s" % (pkg.id, pkg.name, line.rstrip())
retcode = build.wait()
# time.sleep(random.randint(0,60))
#
# r = random.random()
# if r < 0.1:
# retcode = 1
# elif r < 0.15:
# retcode = 254
# else:
# retcode = 0
conn = QManagerClientConn(stderr = sys.stderr)
timeout = 1
try:
(code, vars) = conn.command("release", {'id':pkg.id})
except RequestError, e:
print "[MASTER] Error releasing job %s (%s): %s" % (pkg.name, pkg.id, e.value)
if retcode == 254:
# Requeue soft failure at original priority
# XXX exponential backoff?
time.sleep(60)
# print "Requeueing %s" % pkg.id
self.queue.put((-pkg.depth, pkg))
elif retcode == 253:
# setting up a machine, we should immediately retry
self.queue.put((-pkg.depth, pkg))
elif retcode == 0:
self.queue.put((SUCCESS_PRIO, pkg))
else:
self.queue.put((FAILURE_PRIO, pkg))
# Clean up
worker.lock.acquire()
worker.threads[self]=None
del worker.threads[self]
worker.lock.release()
@staticmethod
def dispatch(mach, job, arch, branch, buildid, queue):
wrk = worker(mach, job, arch, branch, buildid, queue)
worker.lock.acquire()
worker.threads[wrk] = wrk
worker.lock.release()
wrk.start()
def main(arch, branch, buildid, args):
global index
basedir="/var/portbuild/"+arch+"/"+branch+"/builds/"+buildid
portsdir=basedir+"/ports"
indexfile=portsdir+"/INDEX-"+branch
print "[MASTER] parseindex..."
index = Index(indexfile)
index.parse()
print "[MASTER] length = %s" % len(ports)
print "[MASTER] Finding targets..."
targets = gettargets(args)
print "[MASTER] Calculating depth..."
depthindex(targets)
print "[MASTER] Pruning duds..."
dudsfile=basedir+"/duds"
for line in file(dudsfile):
try:
dud = ports[line.rstrip()]
except KeyError:
continue
print "[MASTER] Skipping %s (duds)" % dud.name
dud.destroy_recursive()
queue = PriorityQueue()
# XXX can do this while parsing index if we prune targets/duds
# first
for pkg in ports.itervalues():
if len(pkg.alldep) == 0:
queue.put((-pkg.depth, pkg))
# XXX check osversion, pool
mdl=["arch = %s" % arch]
# Main work loop
while len(ports) > 0:
print "[MASTER] Ports remaining=%s, Queue length=%s" % (len(ports), queue.qsize())
if len(ports) < 10:
print "[MASTER] Remaining ports: %s" % ports.keys()
(prio, job) = queue.get()
if prio == SUCCESS_PRIO:
print "[MASTER] Job %s succeeded" % job.name
for new in job.success():
queue.put((-new.depth, new))
continue
elif prio == FAILURE_PRIO:
if job.status == PHASE2:
print "[MASTER] Job %s failed" % job.name
job.failure()
continue
else:
# Requeue at low priority
print "[MASTER] Job %s failed (requeued for phase 2)" % job.name
job.status = PHASE2
queue.put((PHASE2_BASE_PRIO-job.depth, job))
continue
elif job.status == PHASE2:
depth = -(prio - PHASE2_BASE_PRIO)
else:
depth = -prio
print "[MASTER] Working on job %s, depth %d" % (job.name, depth)
if job.is_stale(arch, branch, buildid):
conn = QManagerClientConn(stderr = sys.stderr)
(code, vars) = conn.command("acquire",
{"name":job.name,
"type":"%s/%s/%s package" % \
(arch, branch, buildid),
"priority":10, "mdl":mdl})
if code[0] == "2":
machine=vars['machine']
job.id=vars['id']
# print "Got ID %s" % job.id
worker.dispatch(machine, job, arch, branch, buildid, queue)
else:
print "[MASTER] Error acquiring job %s: %s" % (pkg.name, code)
else:
print "[MASTER] Skipping %s since it already exists" % job.name
for new in job.success():
queue.put((-new.depth, new))
print "[MASTER] Waiting for threads"
threads = worker.threads.copy()
for t in threads:
print "[MASTER] Outstanding thread: %s" % t.job.name
for t in threads:
print "[MASTER] Waiting for thread %s" % t.job.name
t.join()
print "[MASTER] Finished"
if __name__ == "__main__":
# from guppy import hpy; h = hpy()
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4:])

View File

@ -1,203 +0,0 @@
#!/bin/sh
# $FreeBSD$
#
# pdispatch <arch> <branch> <buildid> <host> <command> <package.tbz> [<args> ...]
#
# server-side script to dispatch the job to a host via the ptimeout script.
pb=/var/portbuild
arch=$1
branch=$2
buildid=$3
host=$4
command=$5
shift 5
pbab=${pb}/${arch}/${branch}
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
timeout=${PDISPATCH_TIMEOUT}
loglength=${PDISPATCH_LOGLENGTH}
hdrlength=${PDISPATCH_HDRLENGTH}
buildid=$(resolve ${pb} ${arch} ${branch} ${buildid})
if [ -z "${buildid}" ]; then
echo "Invalid build ID ${buildid}"
exit 1
fi
builddir=${pbab}/builds/${buildid}
buildenv ${pb} ${arch} ${branch} ${builddir}
# XXX needed still?
unset DISPLAY
# Allow override by HPN-SSH for performance
if [ -z "${ssh_cmd}" ]; then
ssh_cmd=ssh
fi
if [ -z "${scp_cmd}" ]; then
scp_cmd=scp
fi
pkgname=$(basename $1 ${PKGSUFFIX})
if [ -z "${pkgname}" ]; then
echo "null packagename"
exit 1
fi
args=${1+"$@"}
flags=""
clean=1
if [ "x$NOCLEAN" != "x" ]; then
flags="${flags} -noclean"
clean=0
fi
if [ "x$NO_RESTRICTED" != "x" ]; then
flags="${flags} -norestr"
fi
if [ "x$NOPLISTCHECK" != "x" ]; then
flags="${flags} -noplistcheck"
fi
if [ "x$NO_DISTFILES" = "x" ]; then
flags="${flags} -distfiles"
fi
if [ "x$FETCH_ORIGINAL" != "x" ]; then
flags="${flags} -fetch-original"
fi
if [ "x$TRYBROKEN" != "x" ]; then
flags="${flags} -trybroken"
fi
chroot=
. ${pb}/${arch}/portbuild.conf
test -f ${pb}/${arch}/portbuild.${host} && . ${pb}/${arch}/portbuild.${host}
chrootdata=$(${ssh_cmd} -a -n ${client_user}@${host} ${sudo_cmd} ${pb}/scripts/claim-chroot ${arch} ${branch} ${buildid} ${pkgname} 2>&1)
if [ -z "${chrootdata}" ]; then
echo "Failed to claim chroot on ${host}"
exit 254
fi
case "${chrootdata}" in
*/var/portbuild/scripts/claim-chroot*)
# Error executing script, assume system is booting
chrootdata="wait boot"
;;
esac
# echo "Got ${chrootdata} from ${host}"
set -- ${chrootdata}
if [ $# -ge 2 ]; then
case $1 in
chroot)
chroot=$2
;;
setup)
echo "Setting up ${arch}/${branch} build ID ${buildid} on ${host}"
# Run in the background so we can potentially
# claim a slot on another machine. In
# practise I think we often end up trying
# again on the same machine though.
# Make sure to close stdin/stderr in the child
# or make will hang until the child process
# exits
${pb}/scripts/dosetupnode ${arch} ${branch} ${buildid} ${host} > /tmp/setupnode.$$ 2>&1 &
exit 253
;;
error)
echo "Error reported by ${host}: $2"
;;
wait)
echo "Waiting for setup of ${host} to finish"
;;
esac
shift 2
fi
if [ -z "${chroot}" ]; then
exit 254
fi
. ${pb}/${arch}/portbuild.conf
test -f ${pb}/${arch}/portbuild.${host} && . ${pb}/${arch}/portbuild.${host}
rm -f ${builddir}/logs/${pkgname}.log ${builddir}/logs/${pkgname}.log.bz2
rm -f ${builddir}/errors/${pkgname}.log ${builddir}/errors/${pkgname}.log.bz2
${pb}/scripts/ptimeout.host $timeout ${ssh_cmd} -a -n ${client_user}@${host} ${sudo_cmd} ${command} ${arch} ${branch} ${buildid} ${chroot} ${flags} \"$ED\" \"$PD\" \"$FD\" \"$BD\" \"$RD\" ${args} 2>&1
error=$?
# Pull in the results of the build from the client
${scp_cmd} ${client_user}@${host}:${chroot}/tmp/${pkgname}.log ${builddir}/logs/${pkgname}.log
(${ssh_cmd} -a -n ${client_user}@${host} test -f ${chroot}/tmp/work.tbz ) && ${scp_cmd} ${client_user}@${host}:${chroot}/tmp/work.tbz ${builddir}/wrkdirs/${pkgname}.tbz
# XXX Set dirty flag if any of the scp's fail
mkdir -p ${builddir}/distfiles/.pbtmp/${pkgname}
${ssh_cmd} -a -n ${client_user}@${host} tar -C ${chroot}/tmp/distfiles --exclude ${chroot}/tmp/distfiles/RESTRICTED -cf - . | \
tar --unlink -C ${builddir}/distfiles/.pbtmp/${pkgname} -xvf - && \
touch ${builddir}/distfiles/.pbtmp/${pkgname}/.done
if [ "${error}" = 0 ]; then
${ssh_cmd} -a -n ${client_user}@${host} tar -C ${chroot}/tmp -cf - packages | \
tar --unlink -C ${builddir} -xvf -
# XXX why is this needed?
test -f ${builddir}/packages/All/${pkgname}${PKGSUFFIX} && \
touch ${builddir}/packages/All/${pkgname}${PKGSUFFIX}
if [ -f ${builddir}/errors/${pkgname}.log ]; then
rm -f ${builddir}/errors/${pkgname}.log
# Force rebuild of html page to remove this package from list
touch ${builddir}/errors/.force
fi
lockf -k ${pbab}/failure.lock ${pb}/scripts/buildsuccess ${arch} ${branch} ${buildid} ${pkgname}
log=${builddir}/logs/$pkgname.log
if grep -q "even though it is marked BROKEN" ${log}; then
echo | mail -s "${pkgname} BROKEN but built on ${arch} ${branch}" ${mailto}
fi
if grep -q "^list of .*file" ${log}; then
buildlogdir=$(realpath ${builddir}/logs/)
baselogdir=$(basename ${buildlogdir})
(sed -e '/^build started/,$d' $log;echo;echo "For the full build log, see"; echo; echo " http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$(basename $log)";echo;sed -e '1,/^=== Checking filesystem state/d' $log) | mail -s "${pkgname} pkg-plist errors on ${arch} ${branch}" ${mailto}
fi
else
log=${builddir}/errors/${pkgname}.log
${scp_cmd} ${client_user}@${host}:${chroot}/tmp/${pkgname}.log ${log}
result=$?
if [ $result -ne 0 ]; then
(echo ${chroot}@${host}; ${ssh_cmd} -a -n ${client_user}@${host} ls -laR ${chroot}/tmp) | mail -s "${pkgname} logfile not found" ${mailto}
else
if ! grep -q "even though it is marked BROKEN" ${log}; then
buildlogdir=$(realpath ${builddir}/logs/)
baselogdir=$(basename ${buildlogdir})
if [ $(wc -l ${log} | awk '{print $1}') -le $((loglength + hdrlength)) ]; then
(echo "You can also find this build log at"; echo; echo " http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$(basename $log)";echo;cat ${log}) | mail -s "${pkgname} failed on ${arch} ${branch}" ${mailto}
else
(echo "Excerpt from the build log at"; echo; echo " http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$(basename $log)";echo;sed -e '/^build started/,$d' $log;echo;echo " [... lines trimmed ...]";echo;tail -${loglength} ${log}) | mail -s "${pkgname} failed on ${arch} ${branch}" ${mailto}
fi
fi
lockf -k ${pbab}/failure.lock ${pb}/scripts/buildfailure ${arch} ${branch} ${buildid} ${pkgname}
fi
fi
${ssh_cmd} -a -n ${client_user}@${host} ${sudo_cmd} ${pb}/scripts/clean-chroot ${arch} ${branch} ${buildid} ${chroot} ${clean}
# XXX Set a dirty variable earlier and check here
if grep -q "^build of .*ended at" ${builddir}/logs/${pkgname}.log; then
exit ${error}
else
echo "Build of ${pkgname} in ${host}:/${chroot} failed uncleanly"
exit 255
fi

View File

@ -1,117 +0,0 @@
/* pnohang: executes command ($4-) with output in file ($3)
* kills command if no output with $1 seconds with message in $2
* usage: pnohang timeout file command args ...
*/
#include <sys/param.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/stat.h>
#include <signal.h>
#include <unistd.h>
#include <time.h>
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char *argv[])
{
int timeout, status, i, result, ofd;
char *command, *outfile, *message, args[MAXPATHLEN + 1];
char logstr[BUFSIZ + 1];
pid_t pid, pid1, pid2, child;
time_t now;
struct stat st;
struct sigaction sv;
if (argc < 3) {
printf("usage: %s timeout outfile message command [args...]\n",
argv[0]);
exit(1);
}
timeout = atoi(argv[1]);
outfile = argv[2];
message = argv[3];
command = argv[4];
bzero(args, MAXPATHLEN + 1);
for (i = 4; i < argc; i++) {
strlcat(args, argv[i], MAXPATHLEN - strlen(args));
strlcat(args, " ", MAXPATHLEN - strlen(args));
}
pid = getpid();
/*printf("timeout is %d\n", timeout);
printf("outfile is %s\n", outfile);
printf("message is %s\n", message);
printf("arguments are %s", args);*/
if ((ofd = open(outfile, O_CREAT|O_TRUNC|O_WRONLY, 0600)) == -1)
err(1, "open");
if (dup2(ofd, STDOUT_FILENO) == -1)
err(1, "dup2 stdout");
if (dup2(ofd, STDERR_FILENO) == -1)
err(1, "dup2 stderr");
if ((pid1 = fork()) > 0) {
if ((pid2 = fork()) > 0) {
sv.sa_handler = SIG_IGN;
sigemptyset(&sv.sa_mask);
sv.sa_flags = 0;
sigaction(SIGTERM, &sv, 0);
/* parent */
child = wait(&status);
/*printf("exited child is %d, status is %d\n", child, status);*/
if (pid1 = child) {
/*printf("killing process %d (second child)\n", pid2);*/
kill(pid2, SIGTERM);
} else {
/*printf("killing process %d (first child)\n", pid1);*/
kill(pid1, SIGTERM);
}
/* exit status in upper 8 bits, killed signal (if any) in
* lower 8 bits
*/
exit((status >> 8) | (status & 0xff));
} else {
/* second child */
for (;;) {
sleep(timeout/10);
now = time(NULL);
stat(outfile, &st);
if ((now - st.st_mtime) > timeout) {
/*snprintf(logstr, BUFSIZ, "logger -t %s killing %s %s, pid %d since no output in %d seconds", argv[0], args, message, pid, timeout);
system(logstr);*/
printf("%s: killing %s (%s, pid %d and %d) since no output in %d seconds since %s", argv[0], args, message, pid1, pid, timeout, ctime(&now));
printf("ps jgx before the signal\n");
system("ps jgxww");
sleep(1); /* give it a chance to output the message */
kill(pid1, SIGTERM);
sleep(1);
kill(pid, SIGTERM);
sleep(1);
system("ps jgxww");
exit(1);
}
}
}
} else {
/* first child */
/*printf("executing %s\n", args);*/
result = execvp(command, argv + 4);
if (result < 0) {
printf("Failed to exec %s: %s\n", args, strerror(errno));
exit(1);
}
}
return 0;
}

View File

@ -1,301 +0,0 @@
#!/usr/bin/env python
#
# pollmachine
#
# Monitors build machines and notifies qmgr of changes
#
# pollmachine [options] [arch] ...
# - update every machine in the mlist file for [arch]
#
# pollmachine [options] [arch/mach] ...
# - update individual machine(s) for specified architecture
#
# options are:
# -daemon : poll repeatedly
#
# TODO:
# XXX qmgr notification of new/removed machines
# XXX counter before declaring a machine as dead
# Declares a machine as online if it reports 0 data from infoseek?
# * Deal with machines change OS/kernel version
# - ACL list might change!
# - take machine offline, update ACL/arch/etc, reboot, bring online
import sys, threading, socket
from time import sleep
import os, subprocess, logging
if len(sys.argv) < 1:
print "Usage: %s <arch> [<arch> ...]" % sys.argv[0]
sys.exit(1)
arches=set()
mlist={}
polldelay=0
for i in sys.argv[1:]:
if i == "-daemon":
polldelay = 180
continue
if "/" in i:
item=i.partition("/")
arch=item[0]
mach=item[2]
arches.add(arch)
try:
mlist[arch].add(mach)
except KeyError:
mlist[arch] = set((mach,))
else:
arches.add(i)
pb="/var/portbuild"
# set of machines for each arch
machines={}
for i in arches:
machines[i]=set()
# Mapping from machine names to monitor threads
pollthreads={}
class MachinePoll(threading.Thread):
""" Poll a machine regularly """
mach = None # Which machine name to poll
arch = None # Which arch is this assigned to
# Which host/port to poll for this machine status (might be SSH
# tunnel endpoint)
host = None
port = 414
timeout = None # How often to poll
shutdown = False # Exit at next poll wakeup
# State variables tracked
online = False
# Dictionary of variables reported by the client
vars = None
def __init__(self, mach, arch, timeout, host, port):
super(MachinePoll, self).__init__()
self.mach = mach
self.arch = arch
self.timeout = timeout
self.host = host
self.port = port
# How many times the connection timed out since last success
self.timeouts = 0
self.vars = {}
self.setDaemon(True)
def run(self):
while True:
if self.shutdown:
break
self.poll()
if not self.timeout:
break
else:
sleep(self.timeout)
def poll(self):
""" Poll the status of this machine """
nowonline = False
lines = []
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(60)
s.connect((self.host, self.port))
data = ""
while len(data) < 65536:
chunk = s.recv(8192)
if not chunk:
break
data += chunk
nowonline = True
self.timeouts = 0
lines = data.split("\n")
except socket.timeout:
if self.online:
logging.info("[%s] Connection timeout" % self.mach)
self.timeouts += 1
if self.timeouts < 3:
nowonline = self.online
except:
pass
finally:
try:
s.close()
except:
pass
if nowonline != self.online:
logging.info("[%s] Now %s" % (self.mach, "online" if nowonline else "OFFLINE"))
self.online = nowonline
if self.online:
self.timeouts = 0
# XXX inform qmgr of state change
if self.online and not lines and not self.timeouts:
# reportload script is missing
dosetup=1
else:
dosetup=0
for line in lines:
if line == "":
continue
line=line.rstrip()
part=line.partition('=')
if part[1] != '=' or not part[0]:
# if "No such file or directory" in line:
# # Client may require setting up post-boot
# dosetup=1
logging.info("[%s] Bad input: %s" % (self.mach, line))
# Assume client needs setting up
dosetup=1
try:
old = self.vars[part[0]]
except KeyError:
old = ""
if old != part[2]:
self.vars[part[0]] = part[2]
# logging.info("%s@%s: \"%s\" -> \"%s\"" % (part[0], self.mach, old, part[2]))
# XXX update qmgr
try:
envs = self.vars['buildenvs']
for e in envs.split():
(arch, branch, buildid) = e.split("/")
f = "/var/portbuild/%s/%s/builds/%s/.active" % \
(arch, branch, buildid)
if os.path.exists(f):
continue
# Clean up a stale buildenv
logging.info("[%s] Cleaning up stale build: %s" % (self.mach, e))
(err, out) = self.setup(branch, buildid, "-nocopy -full")
if err:
logging.info("[%s] Error from cleanup" % (self.mach))
for l in out.split("\n"):
if l == "":
continue
logging.info("[%s] %s" % (self.mach, l))
except KeyError:
pass
if dosetup:
logging.info("[%s] Setting up machine" % (self.mach))
(err, out) = self.setup("-", "-")
if err:
logging.info("[%s] Error from setup" % (self.mach))
for l in out.split("\n"):
if l == "":
continue
logging.info("[%s] %s" % (self.mach, l))
logging.info("[%s] Setup complete" % (self.mach))
# Validate that arch has not changed (e.g. i386 -> amd64)
try:
if self.arch != self.vars['arch']:
logging.info("[%s] Unexpected arch: %s -> %s" % \
(self.mach, self.arch, self.vars['arch']))
except KeyError:
pass
# Record current system load
try:
f = file("%s/%s/loads/%s" % (pb, self.arch, self.mach), "w")
except:
return
try:
f.write("%s %s\n" % (self.vars['jobs'], self.vars['load']))
except:
pass
f.close()
def setup(self, branch, buildid, args = ""):
cmd = "su ports-%s -c \"/var/portbuild/scripts/dosetupnode %s %s %s %s %s\""\
% (self.arch, self.arch, branch, buildid, self.mach, args)
child = subprocess.Popen(cmd, shell=True, stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
err = child.wait()
out = "".join(child.stdout.readlines())
return (err, out)
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(message)s',
datefmt='%d %b %Y %H:%M:%S',
filename='/var/log/pollmachine.log', filemode='w')
log_console = logging.StreamHandler()
log_console.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] %(message)s',
datefmt = '%d %b %Y %H:%M:%S')
log_console.setFormatter(formatter)
logging.getLogger('').addHandler(log_console)
while True:
for arch in arches:
try:
now = mlist[arch]
except KeyError:
mlistfile="%s/%s/mlist" % (pb, arch)
try:
f = file(mlistfile, "r")
except OSError, error:
raise
now=set(mach.rstrip() for mach in f.readlines())
f.close()
gone = machines[arch].difference(now)
new = now.difference(machines[arch])
machines[arch]=now
for mach in gone:
logging.info("Removing machine %s/%s" % (arch, mach))
# XXX disable from qmgr
pollthreads[mach].shutdown=True
del pollthreads[mach]
for mach in new:
logging.info("Adding machine %s/%s" % (arch, mach))
# XXX set up qmgr
pc="%s/%s/portbuild.conf" % (pb, arch)
pch="%s/%s/portbuild.%s" % (pb, arch, mach)
cmd = "test -f %s && . %s; test -f %s && . %s; echo $infoseek_host; echo $infoseek_port" % (pc, pc, pch, pch)
config = subprocess.Popen(cmd, shell = True,
stdout = subprocess.PIPE)
host=config.stdout.readline().rstrip()
if not host:
host = mach
port=config.stdout.readline().rstrip()
try:
port = int(port)
except (TypeError, ValueError):
port = 414
pollthreads[mach] = MachinePoll(mach, arch, polldelay, host, port)
pollthreads[mach].start()
if not polldelay:
break
sleep(polldelay)

View File

@ -1,337 +0,0 @@
#!/bin/sh
# $FreeBSD$
# client-side script to do all the work surrounding an individual package
# build, and then the package build itself
# note: unredirected 'echo' output goes to the journal file
# usage: $0 ARCH BRANCH BUILDID CHROOT [-noclean] [-norestr] [-noplistcheck] [-distfiles] [-fetch-original] [-trybroken] PKGNAME.tgz DIRNAME [DEPENDENCY.tgz ...]
pb=/var/portbuild
mount_fs()
{
fs=$1
mntpt=$2
master=$3
if [ ${disconnected} = 1 ]; then
mount -t nullfs -r ${fs} ${mntpt}
else
mount_nfs -o ro -3 -i ${master}:${fs} ${mntpt}
fi
}
copypkg()
{
pb=$1
host=$2
from=$3
to=$4
http_proxy=$5
if [ ${host} = $(hostname) ]; then
cp ${pb}/${arch}/${branch}/packages/All/${from} ${to}
else
if [ ! -z "${http_proxy}" ]; then
env HTTP_PROXY=${http_proxy} fetch -m -o ${to} http://${host}/errorlogs/${arch}-${branch}-packages-latest/All/${from}
else
fetch -m -o ${to} http://${host}/errorlogs/${arch}-${branch}-packages-latest/All/${from}
fi
fi
}
bailout()
{
chroot=$1
clean=$2
error=$3
pkgname=$4
echo -n "$pkgname failed unexpectedly on $(hostname) at "
date
exit $error
}
arch=$1
branch=$2
buildid=$3
chroot=$4
shift 4
# Default niceness value
nice=0
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/common.conf
# note: should NOT need anything from server.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/${arch}/portbuild.$(hostname)
. ${pb}/scripts/buildenv
buildroot=${scratchdir}
error=0
clean=1
if [ "x$1" = "x-noclean" ]; then
clean=0
shift
fi
norestr=0
if [ "x$1" = "x-norestr" ]; then
norestr=1
# consumed by bsd.port.mk
export NO_RESTRICTED=1
shift
fi
noplistcheck=0
if [ "x$1" = "x-noplistcheck" ]; then
noplistcheck=1
# consumed by buildscript directly
export NOPLISTCHECK=1
shift
fi
nodistfiles=1
if [ "x$1" = "x-distfiles" ]; then
# consumed by buildscript via make(1)
export ALWAYS_KEEP_DISTFILES=1
nodistfiles=0
shift
fi
if [ "x$1" = "x-fetch-original" ]; then
# consumed by buildscript via make(1)
export FETCH_ORIGINAL=1
shift
fi
if [ "x$1" = "x-trybroken" ]; then
# consumed by bsd.port.mk
export TRYBROKEN=1
shift
fi
ED=$1
PD=$2
FD=$3
BD=$4
RD=$5
builddir=${pb}/${arch}/${branch}/builds/${buildid}
buildenv.common
# Want to use the /etc/make.conf in the chroot
unset __MAKE_CONF
# set overrides for make.conf
export BACKUP_FTP_SITE=${CLIENT_BACKUP_FTP_SITE}
pkgname=$(basename $6 ${PKGSUFFIX})
dirname=$7
shift 2
echo $pkgname
echo $dirname
# set overrides for bsd.port.mk variables
export WRKDIRPREFIX=${CLIENT_WRKDIRPREFIX}
export DISTDIR=${CLIENT_DISTDIR}
export LOCALBASE=${LOCALBASE}
export PACKAGES=${CLIENT_PACKAGES_LOCATION}
export SRC_BASE=${CLIENT_SRCBASE}
# to catch missing dependencies
#export DEPENDS_TARGET=/usr/bin/true
# don't pass -j, -k etc. to sub-makes
unset MAKEFLAGS
unset PORTSDIR
# wait 2 hours before killing build with no output
export BUILD_TIMEOUT=${CLIENT_BUILD_TIMEOUT}
# prevent runaway processes
ulimit -f ${CLIENT_ULIMIT_F}
ulimit -t ${CLIENT_ULIMIT_T}
# directories to clean
cleandirs="${LOCALBASE} /compat /var/db/pkg"
export FTP_TIMEOUT=${CLIENT_FTP_TIMEOUT}
export HTTP_TIMEOUT=${CLIENT_HTTP_TIMEOUT}
export PATH=/sbin:/bin:/usr/sbin:/usr/bin:${LOCALBASE}/sbin:${LOCALBASE}/bin
export MALLOC_OPTIONS=${CLIENT_MALLOC_OPTIONS}
echo "building ${pkgname} in ${chroot}"
bindist=${buildroot}/${branch}/${buildid}/tarballs/bindist.tar
bindistlocal=${buildroot}/${branch}/${buildid}/tarballs/bindist-$(hostname).tar
if [ -f ${chroot}/.notready ]; then
tar -C ${chroot} -xpf ${bindist}
if [ -f ${bindistlocal} ]; then
tar -C ${chroot} -xpf ${bindistlocal}
fi
# to be able to run certain kernel-dependent binaries
# inside the chroot area
cp -p /rescue/mount /rescue/umount ${chroot}/sbin
cp -p /rescue/ps ${chroot}/bin
rm ${chroot}/.notready
touch ${chroot}/.ready
fi
if [ "${use_jail}" = "1" ]; then
# Figure out jail IP addr
chrootpid=$(basename ${chroot})
ipbase=$((${chrootpid}+2))
ip1=$(($ipbase /(256*256)))
ip2=$((($ipbase - ($ip1*256*256)) /256))
ip3=$((($ipbase - ($ip1*256*256) - ($ip2*256))))
fi
trap "bailout ${chroot} ${clean} ${error} ${pkgname}" 1 2 3 9 10 11 15
rm -rf ${chroot}/tmp/*
cd ${chroot}/tmp
mkdir -p depends distfiles packages
echo "building ${pkgname} on $(hostname)" | tee ${chroot}/tmp/${pkgname}.log
echo "in directory ${chroot}" | tee -a ${chroot}/tmp/${pkgname}.log
# intentionally set up ${PORTSDIR} with symlink to catch broken ports
mkdir -p ${chroot}/a/ports
rm -rf ${chroot}/usr/ports
# Don't build in a world-writable standard directory because some ports
# hardcode this path and try to load things from it at runtime, which is
# bad for user security
rm -rf ${chroot}/${WRKDIRPREFIX}
mkdir -p ${chroot}/${WRKDIRPREFIX}
# pick up value from <arch>/portbuild.conf
if [ ! -z "${ccache_dir}" ]; then
mkdir -p ${chroot}/root/.ccache/
if [ "${ccache_dir_nfs}" = "1" ]; then
mount_nfs -o rw -T -3 ${ccache_dir} ${chroot}/root/.ccache/
else
mount -o rw -t nullfs ${ccache_dir} ${chroot}/root/.ccache/
fi
fi
mount_fs ${builddir}/ports ${chroot}/a/ports ${CLIENT_NFS_MASTER}
ln -sf ../a/ports ${chroot}/usr/ports
mkdir -p ${chroot}/usr/src
mount_fs ${builddir}/src ${chroot}${CLIENT_SRCBASE} ${CLIENT_NFS_MASTER}
# set overrides for uname
buildenv.client ${chroot}${CLIENT_SRCBASE}
mount -t devfs foo ${chroot}/dev
umount -f ${chroot}/compat/linux/proc > /dev/null 2>&1
# just in case...
for dir in ${cleandirs}; do
if ! rm -rf ${chroot}${dir} >/dev/null 2>&1; then
chflags -R noschg ${chroot}${dir}
rm -rf ${chroot}${dir} >/dev/null 2>&1
fi
done
rm -rf ${chroot}/var/db/pkg/*
mtree -deU -f ${chroot}/usr/src/etc/mtree/BSD.root.dist -p ${chroot} \
>/dev/null 2>&1
mtree -deU -f ${chroot}/usr/src/etc/mtree/BSD.var.dist -p ${chroot}/var \
>/dev/null 2>&1
mtree -deU -f ${chroot}/usr/src/etc/mtree/BSD.usr.dist -p ${chroot}/usr \
>/dev/null 2>&1
mkdir -p ${chroot}${LOCALBASE}
mtree -deU -f ${chroot}/a/ports/Templates/BSD.local.dist -p ${chroot}${LOCALBASE} \
>/dev/null 2>&1
for i in ${ARCHS_REQUIRING_LINPROCFS}; do
if [ ${i} = ${arch} ]; then
# JDK ports need linprocfs :(
mkdir -p ${chroot}/compat/linux/proc
mount -t linprocfs linprocfs ${chroot}/compat/linux/proc
break
fi
done
_ldconfig_dirs="/lib /usr/lib /usr/lib/compat"
ldconfig_dirs=""
for i in ${_ldconfig_dirs}; do
if [ -d ${chroot}/${i} ]; then
ldconfig_dirs="${ldconfig_dirs} ${i}"
fi
done
chroot ${chroot} /sbin/ldconfig ${ldconfig_dirs}
for i in ${ARCHS_REQUIRING_AOUT_COMPAT}; do
if [ ${i} = ${arch} ]; then
chroot ${chroot} /sbin/ldconfig -aout /usr/lib/aout /usr/lib/compat/aout
break
fi
done
set x $ED $FD $PD $BD $RD
shift 1
while [ $# -gt 0 ]; do
# XXX MCL more hard-coding
if [ ! -f ${chroot}/tmp/depends/$1 ]; then
echo "copying package $1 for ${pkgname}"
copypkg ${pb} ${CLIENT_UPLOAD_HOST} $1 ${chroot}/tmp/depends "${http_proxy}"
# Test for copy failure and bail
# XXX MCL more hard-coding
if [ ! -f ${chroot}/tmp/depends/$1 ]; then
echo "ERROR: Couldn't copy $1" | tee -a ${chroot}/tmp/${pkgname}.log
bailout ${chroot} ${clean} 255 ${pkgname}
fi
fi
shift
done
cp -p ${pb}/scripts/buildscript ${chroot}
cp -p ${pb}/scripts/pnohang.${arch} ${chroot}/pnohang
# phase 1, make checksum
# Needs to be chroot not jail so that port can be fetched
chroot ${chroot} /buildscript ${dirname} 1 "$ED" "$PD" "$FD" "$BD" "$RD" 2>&1 | tee -a ${chroot}/tmp/${pkgname}.log
if [ -f ${chroot}/tmp/status ]; then
error=$(cat ${chroot}/tmp/status)
else
error=255
fi
if [ "${error}" = 0 ]; then
# make checksum succeeded
# phase 2, make package
ln -sf ${pkgname}.log2 ${chroot}/tmp/make.log
if [ "${use_jail}" = 1 ]; then
ifconfig lo0 alias 127.${ip1}.${ip2}.${ip3}/32
jail -J ${chroot}/tmp/jail.id ${chroot} jail-${chrootpid} 127.${ip1}.${ip2}.${ip3} /usr/bin/env JAIL_ADDR=127.${ip1}.${ip2}.${ip3} HTTP_PROXY=${http_proxy} /usr/bin/nice -n $nice /buildscript ${dirname} 2 "$ED" "$PD" "$FD" "$BD" "$RD" > ${chroot}/tmp/${pkgname}.log2 2>&1
ifconfig lo0 delete 127.${ip1}.${ip2}.${ip3}
else
chroot ${chroot} /usr/bin/nice -n ${nice} /buildscript ${dirname} 2 "$ED" "$PD" "$FD" "$BD" "$RD" > ${chroot}/tmp/${pkgname}.log2 2>&1
fi
grep pnohang ${chroot}/tmp/${pkgname}.log2
cat ${chroot}/tmp/${pkgname}.log2 >> ${chroot}/tmp/${pkgname}.log
rm ${chroot}/tmp/${pkgname}.log2
error=$(cat ${chroot}/tmp/status)
fi
rm -rf ${chroot}/${WRKDIRPREFIX}
# Record build completion time for ganglia
echo "${arch} ${branch} ${buildid}" > ${buildroot}/stamp/${pkgname}
exit $error

View File

@ -1,83 +0,0 @@
#!/bin/sh
#
# processfail <arch> <branch>
arch=$1
branch=$2
pb=/var/portbuild
. ${pb}/conf/server.conf
ERRORLOGS_DIRECTORY="${WWW_DIRECTORY}/errorlogs"
of=${ERRORLOGS_DIRECTORY}/.${arch}-${branch}-failure.html
cd ${pb}/${arch}/${branch}
if [ -e .newfailure.stamp -a $(echo $(find . -maxdepth 1 -newer .newfailure.stamp -name newfailure 2>&1 /dev/null | wc -l)) = "0" ]; then exit; fi
touch .newfailure.stamp
newfailure=${pb}/${arch}/${branch}/newfailure
num=0
if [ -e ${newfailure} ]; then
num=$(wc -l ${newfailure} | awk '{print $1}')
fi
header() {
echo "<html><head><title>New package building errors</title>" >$of
echo "</head><body><h1>New package building errors</h1>" >>$of
if [ "$num" -eq "0" ]; then
echo "No errors (yet)" >>$of
else
echo "<table border=1>" >>$of
echo "<tr>$1</tr>" >>$of
fi
}
footer() {
echo "</table>" >>$of
echo "</body>" >>$of
echo "</html>" >>$of
}
#
# Create "default" output, sorted on portname
#
header "<th>Port</th><th>Build log</th><th>First broken</th><th>Last tried</th><th># tries</th>"
dirname() {
echo ${1%/*}
}
basename() {
echo ${1##*/}
}
sort -r -n -k 4 -t \| failure > newfailure
IFS='|'
while read dir name ver date last count; do
echo "<tr>" >> $of
echo "<td><a href=\"http://cvsweb.freebsd.org/ports/$dir\">$dir</a></td>" >> $of
if [ -L ${pb}/${arch}/${branch}/latest/${dir} ]; then
err=$(readlink ${pb}/${arch}/${branch}/latest/${dir})
echo "<td><a href=\"${arch}-errorlogs/$(basename $(dirname ${err}))/$(basename ${err})\">$ver</a></td>" >> $of
else
echo "<td><a href=\"${arch}-${branch}-latest/$ver.log\">$ver</a></td>" >> $of
fi
# echo "<td align=\"right\">$affby</td><td align=\"right\">$4 Kb</td>" >> $of
# echo "<td><a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$5\">$5</a></td>" >> $of
# echo "<td><a href=\"mailto:$6\">$6</a></td>" >> $of
# echo "<td>" >> $of
alphadate=$(date -jf %s ${date} "+%F %T")
alphalast=$(date -jf %s ${last} "+%F %T")
echo "<td>${alphadate}</td>" >> $of
echo "<td>${alphalast}</td>" >> $of
echo "<td>$count</td>" >> $of
echo "</tr>" >> $of
done < newfailure
footer ""
mv -f $of ${ERRORLOGS_DIRECTORY}/${arch}-${branch}-failure.html

View File

@ -1,370 +0,0 @@
#!/bin/sh
# Process the logs in a certain directory and construct the HTML pages
# summarizing them
#
# We only construct the new html pages if either
# * No HTML files exist yet
# * .force exists in this directory (set by pdispatch when a log is
# removed by package build success)
# * A new log file has appeared since the last run.
#
# Individual summary data for the logs is saved between runs instead of
# being recomputed each time. Removing the old data for logs that
# no longer exist is the responsibility of the dopackages script when
# starting a new build.
# echo 'processlogs: at '`date`', begin'
arch=$1
# establish which directory the logfiles live in (leave out for
# backwards compatibility)
buildlogdir=$(realpath .)
if [ "$2" != "" ]; then
buildlogdir="$2"
fi
baselogdir=$(basename ${buildlogdir})
# establish which directory INDEX lives in (leave out for
# backwards compatibility)
indexlogdir=$(realpath .)
if [ "$3" != "" ]; then
indexlogdir="$3";
fi
# allow this script to be run from anywhere in the tree
scriptdir=$(dirname $0)
errorscript=$scriptdir/processonelog
# Figure out which arch we're building for
pb=/var/portbuild
. ${pb}/conf/server.conf
. ${pb}/${arch}/portbuild.conf
# get the list of buildlogs.
cd ${buildlogdir}
if [ -e .force -o \! -e index.html ]; then
rm -f .force
force=1
else
force=0
fi
alllogs=$(find . -maxdepth 1 -type f \( -name '*.log' -o -name '*.log.bz2' \) | sed -e 's,^./,,' 2>/dev/null)
if [ -e .stamp -a ${#alllogs} -eq 0 ]; then
exit
fi
# create a name for the tempfile
of=.index.html
# XXX temporary
test -f .logs && rm -f .logs
mkdir -p .logs
#
# Read the log-files and write summaries to .logs in the format
# $filename|$portname|$affected|$logsize|$dir|$maintainer|\
# $reason|$tag|$broken|$datetime
#
echo "processlogs: at $(date), begin processing log files for ${arch} in ${baselogdir}"
rm -f .logs/.all
set -- ${alllogs}
gotnew=0
while [ $# -ge 1 ]; do
log=$1
shift
if [ ${log%.log.bz2} != ${log} -a -e ${log%.bz2} ]; then
# We have both a .log.bz2 and a .log, assume the former is stale
rm -f ${log}
continue
fi
# basename with .log and .log.bz2 stripped
base=${log%.bz2}
base=${log%.log}
if [ ${log} -nt .stamp -o ! -e .logs/${base} ]; then
# Save to the base name so the data persists after the log is
# compressed
${errorscript} ${log} ${indexlogdir} > .logs/${base}
gotnew=1
fi
cat .logs/${base} >> .logs/.all
done
touch .stamp
n_logs=0
if [ -e .logs/.all ]; then
n_logs=$(cat .logs/.all | wc -l)
fi
echo "processlogs: at "`date`", end processing log files for ${arch} in ${baselogdir}"
if [ ${force} -eq 0 -a ${gotnew} -eq 0 ]; then
# Nothing new to do
exit
fi
header() {
echo "<html><head><title>Package building errors</title>" >$of
echo "</head><body><h1>Package building errors</h1>" >>$of
echo "<p>View by " >>$of
echo "[ <a href=\"index.html\">port</a> " >>$of
echo "| <a href=\"index-maintainer.html\">maintainer</a> " >>$of
echo "| <a href=\"index-category.html\">category</a> " >>$of
echo "| <a href=\"index-reason.html\">error</a> " >>$of
echo "| <a href=\"index-builddate.html\">builddate</a> " >>$of
echo "]</p>" >>$of
if [ ${n_logs} = "0" ]; then
echo "No errors (yet)" >>$of
else
if [ -s .updated ]; then
echo "ports update finished at: $(cat .updated)<br>" >> $of
fi
latest=$(ls -rtTl *.log *.log.bz2 2> /dev/null | tail -1 | awk '{printf("%s %s %s %s\n",$6,$7,$8,$9)}')
echo "Timestamp of newest log: $latest<br><br>" >> $of
echo "\"Aff.\" is number of ports that depend on this one<br>" >> $of
echo "\"<font color=\"red\">[B]</font>\" indicates port is marked BROKEN (Note: BROKEN ports are not frequently rebuilt so they may not be listed here)<br><br>" >> $of
echo "<p>${n_logs} errors</p>" >> $of
echo "<table border=1>" >>$of
echo "<tr>$1</tr>" >>$of
fi
}
footer() {
echo "</table>" >>$of
echo "</body>" >>$of
echo "</html>" >>$of
}
# Now reread the .logs/.all file and create the reports.
# echo 'processlogs: at '`date`', create default output'
#
# Create "default" output, sorted on portname
#
header "<th>Port</th><th>Aff.</th><th>Size</th><th>CVS</th><th>Maintainer</th><th>Reason</th><th>Build date</th>"
if [ ${n_logs} -gt 0 ]; then
sort .logs/.all | while read line; do
IFS="|"
set -- ${line}
unset IFS
mailto="$6"
if [ "$6" != "" ] ; then
mailto="<a href=\"mailto:$6\">$6</a>"
fi
echo "<tr>" >> $of
echo "<td><a href=\"http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$1\">$2</a></td>" >> $of
affby="$3"
test "${affby}" = "0" -o "${affby}" = "-1" && affby="&nbsp;"
echo "<td align=\"right\">${affby}</td><td align=\"right\">$4 Kb</td>" >> $of
echo "<td><a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$5\">$5</a></td>" >> $of
echo "<td>$mailto</td>" >> $of
echo "<td>" >> $of
test "$9" = "yes" && echo "<font color=\"red\">[B]</font>" >> $of
reason="$7"
echo "<a href=\"http://${MASTER_URL}/#$8\">$reason</a>" >> $of
echo "</td>" >> $of
date=`echo ${10} | sed -e "s/_/ /g"`
echo "<td>$date</td>" >> $of
echo "</tr>" >> $of
done
fi
footer ""
mv -f $of index.html
# echo 'processlogs: at '`date`', create output sorted by category'
#
# Create output by category
#
header "<th>CVS</th><th>Aff.</th><th>Size</th><th>Port</th><th>Maintainer</th><th>Reason</th><th>Build date</th>"
if [ ${n_logs} -gt 0 ]; then
sort -t \| +4 .logs/.all | while read line; do
IFS="|"
set -- $line
unset IFS
mailto="$6"
if [ "$6" != "" ] ; then
mailto="<a href=\"mailto:$6\">$6</a>"
fi
echo "<tr>" >> $of
echo "<td><a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$5\">$5</a></td>" >> $of
affby="$3"
test "${affby}" = "0" -o "${affby}" = "-1" && affby="&nbsp;"
echo "<td align=\"right\">${affby}</td><td align=\"right\">$4 Kb</td>" >> $of
echo "<td><a href=\"http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$1\">$2</a></td>" >> $of
echo "<td>$mailto</td>" >> $of
echo "<td>" >> $of
test "$9" = "broken" && echo "<font color=\"red\">[B]</font>" >> $of
reason="$7"
echo "<a href=\"http://${MASTER_URL}/#$8\">$reason</a>" >> $of
echo "</td>" >> $of
date=`echo ${10} | sed -e "s/_/ /g"`
echo "<td>$date</td>" >> $of
echo "</tr>" >> $of
done
fi
footer ""
mv -f $of index-category.html
# echo 'processlogs: at '`date`', create output sorted by maintainer'
#
# Create output by maintainer
#
header "<th>Maintainer</th><th>Port</th><th>Aff.</th><th>Size</th><th>CVS</th><th>Reason</th><th>Build date</th>"
if [ ${n_logs} -gt 0 ]; then
sort -t \| +5 .logs/.all | while read line; do
IFS="|"
set -- $line
unset IFS
mailto="$6"
if [ "$6" != "" ] ; then
mailto="<a href=\"mailto:$6\">$6</a>"
fi
echo "<tr>" >> $of
echo "<td>$mailto</td>" >> $of
echo "<td><a href=\"http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$1\">$2</a></td>" >> $of
affby="$3"
test "${affby}" = "0" -o "${affby}" = "-1" && affby="&nbsp;"
echo "<td align=\"right\">${affby}</td><td align=\"right\">$4 Kb</td>" >> $of
echo "<td><a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$5\">$5</a></td>" >> $of
echo "<td>" >> $of
test "$9" = "broken" && echo "<font color=\"red\">[B]</font>" >> $of
reason="$7"
echo "<a href=\"http://${MASTER_URL}/#$8\">$reason</a>" >> $of
echo "</td>" >> $of
date=`echo ${10} | sed -e "s/_/ /g"`
echo "<td>$date</td>" >> $of
echo "</tr>" >> $of
done
fi
footer ""
mv -f $of index-maintainer.html
# echo 'processlogs: at '`date`', create output sorted by error'
#
# Create output by error
#
header "<th>Reason</th><th>Port</th><th>Aff.</th><th>Size</th><th>CVS</th><th>Maintainer</th><th>Build date</th>"
if [ ${n_logs} -gt 0 ]; then
sort -t \| +7 .logs/.all | while read line; do
IFS="|"
set -- ${line}
unset IFS
mailto="$6"
if [ "$6" != "" ] ; then
mailto="<a href=\"mailto:$6\">$6</a>"
fi
echo "<tr>" >> $of
echo "<td>" >> $of
test "$9" = "broken" && echo "<font color=\"red\">[B]</font>" >> $of
reason="$7"
echo "<a href=\"http://${MASTER_URL}/#$8\">$reason</a>" >> $of
echo "</td>" >> $of
echo "<td><a href=\"http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$1\">$2</a></td>" >> $of
affby="$3"
test "${affby}" = "0" -o "${affby}" = "-1" && affby="&nbsp;"
echo "<td align=\"right\">${affby}</td><td align=\"right\">$4 Kb</td>" >> $of
echo "<td><a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$5\">$5</a></td>" >> $of
echo "<td>$mailto</td>" >> $of
date=`echo ${10} | sed -e "s/_/ /g"`
echo "<td>$date</td>" >> $of
echo "</tr>" >> $of
done
fi
footer ""
mv -f $of index-reason.html
# echo 'processlogs: at '`date`', create output sorted by builddate'
#
# Create output by builddate
#
header "<th>Build date</th><th>Port</th><th>Aff.</th><th>Size</th><th>CVS</th><th>Maintainer</th><th>Reason</th>"
if [ ${n_logs} -gt 0 ]; then
sort -t \| +9 .logs/.all | while read line; do
IFS="|"
set -- ${line}
unset IFS
mailto="$6"
if [ "$6" != "" ] ; then
mailto="<a href=\"mailto:$6\">$6</a>"
fi
echo "<tr>" >> $of
date=`echo ${10} | sed -e "s/_/ /g"`
echo "<td>$date</td>" >> $of
echo "<td><a href=\"http://${MASTER_URL}/errorlogs/${arch}-errorlogs/${baselogdir}/$1\">$2</a></td>" >> $of
affby="$3"
test "${affby}" = "0" -o "${affby}" = "-1" && affby="&nbsp;"
echo "<td align=\"right\">${affby}</td><td align=\"right\">$4 Kb</td>" >> $of
echo "<td><a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$5\">$5</a></td>" >> $of
echo "<td>$mailto</td>" >> $of
echo "<td>" >> $of
test "$9" = "broken" && echo "<font color=\"red\">[B]</font>" >> $of
reason="$7"
echo "<a href=\"http://${MASTER_URL}/#$8\">$reason</a>" >> $of
echo "</td>" >> $of
echo "</tr>" >> $of
done
fi
footer ""
mv -f $of index-builddate.html
# echo 'processlogs: at '`date`', create maintainer list'
#
# Get list of maintainers.
if [ ${n_logs} -gt 0 ]; then
cut -f 6 -d \| .logs/.all | sort -fu > maintainers
else
cat /dev/null > maintainers
fi
# echo 'processlogs: at '`date`', done'

View File

@ -1,67 +0,0 @@
#!/bin/sh
of=extras.html.new
if [ \! -e .force -a -e .stamp -a $(echo $(find . -newer .stamp -type f -name '*.log' 2>/dev/null | wc -l)) = "0" ]; then exit; fi
echo "<html><head><title>List of files and directories that do not match their mtree description</title>" >$of
echo "<h1>List of files and directories that do not match their mtree description</h1>" >>$of
echo "</head><body>" >>$of
rm -f .force
touch .stamp
find . -name '*.log' | xargs grep -l '^list of.*file' | sort | sed -e 's/^..//' > .tmp
if [ $(echo $(cat .tmp | wc -l)) = 0 ]; then
echo "No extra files (yet)" >> $of
else
set $(cat .tmp)
num=$#
if [ -s .updated ]; then
echo "(ports update finished at: $(cat .updated))<br>" >> $of
fi
echo "(timestamp of newest log: $(ls -rtTl | grep '\.log$' | tail -1 | awk '{printf("%s %s %s %s\n",$6,$7,$8,$9)}'))<br><br>" >> $of
echo "<table border=1>" >>$of
echo "<tr><th>Log</th><th>Aff.</th><th>Size</th><th>Repository</th><th>Maintainer</th><th>Pathname</th></tr>" >>$of
while [ $# -gt 0 ]; do
log=$(basename $1 .log)
echo -n "<tr><td valign=\"top\">" >>$of
echo -n "<a name=\"$log.log\"></a>" >> $of
echo -n "<a href=\"$log.log\">" >>$of
echo -n $log >>$of
echo -n "</a>" >>$of
echo -n "</td><td align=right valign=\"top\">" >>$of
affected=$(($(grep -cF $log < INDEX) - 1))
if [ $affected != 0 ]; then echo -n $affected >>$of; fi
echo -n "</td><td align=right valign=\"top\">" >>$of
size=$(/bin/ls -sk $log.log | awk '{print $1}')
echo -n "$size KB" >>$of
echo -n "</td><td valign=\"top\">" >>$of
dir=$(sed -n -e '5p' $log.log | awk '{print $3}' | sed -e 's,^/[^/]*/[^/]*/,,')
echo -n "<a href=\"http://www.FreeBSD.org/cgi/cvsweb.cgi/ports/$dir\">$dir</a>" >>$of
echo -n "</td><td valign=\"top\">" >>$of
maint=$(sed -n -e '4p' $log.log | awk '{print $3}')
maints="$maints $maint"
echo -n "<a href=\"mailto:$maint\">$maint</a>" >>$of
echo "</td><td valign=\"top\">" >>$of
cat $log.log | sed -e '1,/^list of extra files and directories/d' -e '/^list of/,$d' | awk '{print $11}' | sed -E -e 's,^,<font color=\"magenta\">,' -e 's,(usr/(local|X11R6)),<font color=\"black\">\1,g' -e 's,$,</font></font><br>,' >>$of
cat $log.log | sed -e '1,/^list of files present before this port was installed/d' -e '/^list of/,$d' -e 's,^\./,,' | awk '{print $1}' | sed -e 's,^,<font color=\"orange\">,' -e 's,$, (missing)</font><br>,' >> $of
cat $log.log | sed -e '1,/^list of filesystem changes/d' -e '/^ /d' | awk '{print $1}' | sed -e 's,^,<font color=\"red\">,' -e 's,$, (changed)</font><br>,' >>$of
echo "</td></tr>" >>$of
shift
done
echo "</table><br>" >> $of
echo "$num errors<br>" >> $of
fi
rm .tmp
echo "<hr>" >> $of
echo "<a href=\"../\">back to top</a>" >> $of
echo "</body></html>" >>$of
mv -f $of extras.html
echo $maints | sed -e 's/ /\
/g' | sort -fu > maintainers

View File

@ -1,199 +0,0 @@
#!/bin/sh
# $FreeBSD$
#
# Read a single errorlogfile and output a line of the format
# $filename|$portname|$affected|$logsize|$dir|$maintainer|\
# $reason|$tag|$broken|$datetime
#
# Originally factored out of: ports/Tools/portbuild/scripts/processlogs
filename=$1
indexdir=.
errordir=.
if [ "$2" != "" ]; then indexdir=$2; fi
if [ "$3" != "" ]; then errordir=$3; fi
indexdir=$(realpath $indexdir)
errordir=$(realpath $errordir)
indexfilename=$indexdir/INDEX
HEADER_LINES=9
portname=$(basename $filename .log.bz2)
if [ "${portname}" = "${filename}" ]; then
cat=cat
else
cat=bzcat
fi
portname=$(basename $portname .log)
affected=$(($(grep -cF $portname < $indexfilename) -1))
logsize=$(/bin/ls -sk $errordir/$filename | awk '{print $1}')
dir=$(${cat} $errordir/$filename | head -$HEADER_LINES | grep '^port directory:' | awk '{print $3}' | \
sed -e 's,^/[^/]*/[^/]*/,,')
maintainer=$(${cat} $errordir/$filename | head -$HEADER_LINES | grep '^maintained by' | awk '{print $3}')
datetime=$(${cat} $errordir/$filename | head -$HEADER_LINES | grep '^build started at' | \
sed -e 's/build started at ...//' | tr ' ' '_' )
# now try to classify the type of error found in the file.
# the first case handles failures to even try to build any
# port (i.e. HTML file no longer there, pointyhat being unable
# to fetch any file, pointyhat being able to build any port, etc.)
if [ -z "$dir" -o -z "$datetime" ]; then
reason="cluster"; tag="cluster"
elif bzgrep -qE "(Error: mtree file ./etc/mtree/BSD.local.dist. is missing|error in pkg_delete|filesystem was touched prior to .make install|list of extra files and directories|list of files present before this port was installed|list of filesystem changes from before and after)" $1; then
reason="mtree"; tag="mtree"
# note: must run before the configure_error check
elif bzgrep -qE "Configuration .* not supported" $1; then
reason="arch"; tag="arch"
elif bzgrep -qE '(configure: error:|Script.*configure.*failed unexpectedly|script.*failed: here are the contents of)' $1; then
if bzgrep -qE "configure: error: cpu .* not supported" $1; then
reason="arch"; tag="arch"
elif bzgrep -qE "configure: error: [Pp]erl (5.* required|version too old)" $1; then
reason="perl"; tag="perl"
elif bzgrep -q 'sem_wait: Invalid argument' $1; then
reason="sem_wait"; tag="sem_wait"
else
reason="configure_error"; tag="configure"
fi
elif bzgrep -q "Couldn't fetch it - please try" $1; then
reason="fetch"; tag="fetch"
elif bzgrep -q "Error: shared library \".*\" does not exist" $1; then
reason="LIB_DEPENDS"; tag="libdepends"
elif bzgrep -qE "\.(c|cc|cxx|cpp|h|y)[1-9:]+ .+\.h: No such file" $1; then
reason="missing_header"; tag="header"
elif bzgrep -qE '(nested function.*declared but never defined|warning: nested extern declaration)' $1; then
reason="nested_declaration"; tag="nested_declaration"
# note: must be run before compiler_error
elif bzgrep -q '#warning "this file includes <sys/termios.h>' $1; then
reason="termios"; tag="termios"
# note: must be run before compiler_error
elif bzgrep -qE "(#error define UTMP_FILENAME in config.h|error: ._PATH_UTMP. undeclared|error: .struct utmpx. has no member named .ut_name|error: invalid application of .sizeof. to incomplete type .struct utmp|utmp.h> has been replaced by <utmpx.h)" $1; then
reason="utmp_x"; tag="utmp_x"
elif bzgrep -qE '(parse error|too (many|few) arguments to|argument.*doesn.*prototype|incompatible type for argument|conflicting types for|undeclared \(first use (in |)this function\)|incorrect number of parameters|has incomplete type and cannot be initialized|error: storage size.* isn.t known)' $1; then
reason="compiler_error"; tag="cc"
elif bzgrep -qE '(ANSI C.. forbids|is a contravariance violation|changed for new ANSI .for. scoping|[0-9]: passing .* changes signedness|lacks a cast|redeclared as different kind of symbol|invalid type .* for default argument to|wrong type argument to unary exclamation mark|duplicate explicit instantiation of|incompatible types in assignment|assuming . on overloaded member function|call of overloaded .* is ambiguous|declaration of C function .* conflicts with|initialization of non-const reference type|using typedef-name .* after|[0-9]: size of array .* is too large|fixed or forbidden register .* for class|assignment of read-only variable|error: label at end of compound statement|error:.*(has no|is not a) member|error:.*is (private|protected)|error: uninitialized member|error: unrecognized command line option)' $1; then
reason="new_compiler_error"; tag="newgcc"
# XXX MCL must preceed badc++
elif bzgrep -qE "error: invalid conversion from .*dirent" $1; then
reason="dirent"; tag="dirent"
elif bzgrep -qE '(syntax error before|friend declaration|no matching function for call to|.main. must return .int.|invalid conversion from|cannot be used as a macro name as it is an operator in C\+\+|is not a member of type|after previous specification in|no class template named|because worst conversion for the former|better than worst conversion|no match for.*operator|no match for call to|undeclared in namespace|is used as a type, but is not|error: array bound forbidden|error: class definition|error: expected constructor|error: there are no arguments|error:.*cast.*loses precision|ISO C\+\+ does not support|error: invalid pure specifier)' $1; then
reason="bad_C++_code"; tag="badc++"
elif bzgrep -qE 'error: (array type has incomplete element type|conflicts with new declaration|expected.*before .class|expected primary expression|extra qualification .* on member|.*has incomplete type|invalid cast from type .* to type|invalid lvalue in (assignment|decrement|increment|unary)|invalid storage class for function|lvalue required as (increment operator|left operand)|.*should have been declared inside|static declaration of.*follows non-static declaration|two or more data types in declaration specifiers|.* was not declared in this scope)' $1; then
reason="gcc4_error"; tag="gcc4"
elif bzgrep -qE '(/usr/libexec/elf/ld: cannot find|undefined reference to|cannot open -l.*: No such file|error: linker command failed with exit code 1)' $1; then
reason="linker_error"; tag="ld"
elif bzgrep -q 'install: .*: No such file' $1; then
reason="install_error"; tag="install"
elif bzgrep -qE "(conflicts with installed package|is already installed - perhaps an older version|You may wish to ..make deinstall.. and install this port again)" $1; then
reason="depend_object"; tag="dependobj"
elif bzgrep -q "core dumped" $1; then
reason="coredump"; tag="coredump"
# linimon would _really_ like to understand how to fix this problem
elif bzgrep -q "pkg_add: tar extract.*failed!" $1; then
reason="truncated_distfile"; tag="truncated_distfile"
elif bzgrep -qE "(error: C++ requires a type specifier|error: allocation of incomplete type|error: array is too large|error: binding of reference|error: called object type|error: cannot combine with previous.*specifier|error: cannot initialize (a parameter|a variable|return object)|error: cannot pass object|error:.*cast from pointer|error: comparison of unsigned.*expression.*is always false|error: conversion.*(is ambiguous|specifies type)|error:.*converts between pointers to integer|error: declaration of.*shadows template parameter|error:.*declared as an array with a negative size|error: default arguments cannot be added|error: default initialization of an object|error:.*directive requires a positive integer argument|error: elaborated type refers to a typedef|error: exception specification|error: expected.*(at end of declaration|expression|identifier)|error: explicit specialization.*after instantiation|error: explicitly assigning a variable|error: expression result unused|error: fields must have a constant size|error: flexible array member|error: (first|second) parameter of .main|error: format string is not a string literal|error: global register values are not supported|error: if statement has empty body|error: illegal storage class on function|error: implicit (declaration|instantiation)|error: indirection.*will be deleted|error: initializer element is not.*constant|error: initialization of pointer|error: invalid (argument type|integral value|operand|token|use of a cast|value)|error: indirect goto might cross|error:.*is a (private|protected) member|error: member (of anonymous union|reference)|error: non-const lvalue|error: non-void function.*should return a value|error:.*not supported|error: no (matching constructor|member named|viable overloaded)|error: passing.(a.*value|incompatible type)|error: qualified reference|error: redeclaration of.*built-in type|error:.*requires a (constant expression|pointer or reference|type specifier)|error: redefinition of|error: switch condition has boolean|error: taking the address of a temporary object|error:.*unable to pass LLVM bit-code files to linker|error: unexpected token|error: unknown (machine mode|type name)|error: unsupported (inline asm|option)|error: unused (function|parameter)|error: use of (GNU old-style field designator|undeclared identifier|unknown builtin)|error: using the result of an assignment|error: variable length array|error: void function.*should not return a value|the clang compiler does not support|Unknown depmode none)" $1; then
reason="clang"; tag="clang"
# below here are the less common items
# XXX MCL "file not recognized: File format not recognized" can be clang
elif bzgrep -qE "(.s: Assembler messages:|Cannot (determine .* target|find the byte order) for this architecture|^cc1: bad value.*for -mcpu.*switch|could not read symbols: File in wrong format|[Ee]rror: [Uu]nknown opcode|error.*Unsupported architecture|ENDIAN must be defined 0 or 1|failed to merge target-specific data|(file not recognized|failed to set dynamic section sizes): File format not recognized|impossible register constraint|inconsistent operand constraints in an .asm|Invalid configuration.*unknown.*machine.*unknown not recognized|invalid lvalue in asm statement|is only for.*, and you are running|not a valid 64 bit base/index expression|relocation R_X86_64_32.*can not be used when making a shared object|relocation truncated to fit: |shminit failed: Function not implemented|The target cpu, .*, is not currently supported.|This architecture seems to be neither big endian nor little endian|unknown register name|Unable to correct byte order|Unsupported platform, sorry|won't run on this architecture)" $1; then
reason="arch"; tag="arch"
elif bzgrep -qE "autoconf([0-9\-\.]*): (not found|No such file or directory)" $1; then
reason="autoconf"; tag="autoconf"
elif bzgrep -q "autoheader: not found" $1; then
reason="autoheader"; tag="autoheader"
elif bzgrep -qE "automake(.*): not found" $1; then
reason="automake"; tag="automake"
elif bzgrep -q 'Checksum mismatch' $1; then
reason="checksum"; tag="checksum"
elif bzgrep -qE "(clang: error:|clang++: error:|error: cannot compile this.*yet|error: clang frontend command failed|error:.*ignoring directive for now|error: (invalid|unknown use of) instruction mnemonic|error:.*please report this as a bug|error: unknown argument)" $1; then
reason="clang-bug"; tag="clang-bug"
elif bzgrep -q "Shared object \"libc.so.6\" not found, required by" $1; then
reason="compat6x"; tag="compat6x"
elif bzgrep -q "Fatal error .failed to get sysctl kern.sched.cpusetsize" $1; then
reason="cpusetsize"; tag="cpusetsize"
elif bzgrep -q "error in dependency .*, exiting" $1; then
reason="depend_package"; tag="dependpkg"
elif bzgrep -qE "pkg_(add|create):.*(can't find enough temporary space|projected size of .* exceeds available free space)" $1; then
reason="disk_full"; tag="df"
elif bzgrep -qE "((Can't|unable to) open display|Cannot open /dev/tty for read|RuntimeError: cannot open display|You must run this program under the X-Window System)" $1; then
reason="DISPLAY"; tag="display"
elif bzgrep -qE '(No checksum recorded for|(Maybe|Either) .* is out of date, or)' $1; then
reason="distinfo_update"; tag="distinfo"
elif bzgrep -qE "Member name contains .\.\." $1; then
reason="fetch"; tag="fetch"
elif bzgrep -qE "(pnohang: killing make checksum|fetch: transfer timed out)" $1; then
reason="fetch_timeout"; tag="fetch-timeout"
elif bzgrep -q "See <URL:http://gcc.gnu.org/bugs.html> for instructions." $1; then
reason="gcc_bug"; tag="gcc-bug"
elif bzgrep -qE "(missing separator|mixed implicit and normal rules|recipe commences before first target).*Stop" $1; then
reason="gmake"; tag="gmake"
elif bzgrep -qE "(Run-time system build failed for some reason|tar: Error opening archive: Failed to open.*No such file or directory)" $1; then
reason="install_error"; tag="install"
elif bzgrep -qE "(cc: .*libintl.*: No such file or directory|cc: ndbm\.so: No such file or directory|error: The X11 shared library could not be loaded|libtool: link: cannot find the library|relocation against dynamic symbol|Shared object.*not found, required by)" $1; then
reason="linker_error"; tag="ld"
elif bzgrep -q "libtool: finish: invalid argument" $1; then
reason="libtool"; tag="libtool"
elif bzgrep -q "Could not create Makefile" $1; then
reason="makefile"; tag="makefile"
elif bzgrep -v "regression-test.continuing" $1 | grep -qE "make.*(cannot open [Mm]akefile|don.t know how to make|fatal errors encountered|No rule to make target|built-in)"; then
reason="makefile"; tag="makefile"
elif bzgrep -q "/usr/.*/man/.*: No such file or directory" $1; then
reason="manpage"; tag="manpage"
elif bzgrep -q "out of .* hunks .*--saving rejects to" $1; then
reason="patch"; tag="patch"
elif bzgrep -qE "((perl|perl5.6.1):.*(not found|No such file or directory)|cp:.*site_perl: No such file or directory|perl(.*): Perl is not installed, try .pkg_add -r perl|Perl .* required--this is only version)" $1; then
reason="perl"; tag="perl"
elif bzgrep -qE "(Abort trap|Bus error|Error 127|Killed: 9|Signal 1[01])" $1; then
reason="process_failed"; tag="process"
elif bzgrep -qE "(USER PID PPID PGID.*JOBC STAT TT TIME COMMAND|pnohang: killing make package)" $1; then
reason="runaway_process"; tag="runaway"
elif bzgrep -qE "(/usr/bin/ld: cannot find -l(pthread|XThrStub)|cannot find -lc_r|Error: pthreads are required to build this package|Please install/update your POSIX threads (pthreads) library|requires.*thread support|: The -pthread option is deprecated)" $1; then
reason="threads"; tag="threads"
elif bzgrep -qi 'read-only file system' $1; then
reason="WRKDIR"; tag="wrkdir"
# Although these can be fairly common, and thus in one sense ought to be
# earlier in the evaluation, in practice they are most often secondary
# types of errors, and thus need to be evaluated after all the specific
# cases.
elif bzgrep -qE 'cc1.*warnings being treated as errors' $1; then
reason="compiler_error"; tag="cc"
elif bzgrep -q 'tar: Error exit delayed from previous errors' $1; then
reason="install_error"; tag="install"
elif bzgrep -q "Cannot stat: " $1; then
reason="configure_error"; tag="configure"
elif bzgrep -q "/usr/bin/ld: cannot find -l" $1; then
reason="linker_error"; tag="ld"
elif bzgrep -q "cd: can't cd to" $1; then
reason="NFS"; tag="nfs"
elif bzgrep -q "pkg_create: make_dist: tar command failed with code" $1; then
reason="PLIST"; tag="plist"
elif bzgrep -q "Segmentation fault" $1; then
reason="segfault"; tag="segfault"
else
reason="???"; tag="unknown"
fi
# clean up some error cases -- the way .logs works, it expects that
# every field in it MUST be nonblank, so we insert a metatoken here.
# See below.
if [ -z "$dir" ]; then
dir="NONE"
fi
if [ -z "$maintainer" ]; then
maintainer="NONE"
fi
if [ -z "$datetime" ]; then
datetime="NONE"
fi
broken="no"
if bzgrep -q "Trying build of .* even though it is marked BROKEN" $1; then
broken="broken"
fi
echo "$filename|$portname|$affected|$logsize|$dir|$maintainer|$reason|$tag|$broken|$datetime|$errordir"

View File

@ -1,50 +0,0 @@
#!/bin/sh
# Test packages and make sure they are not corrupt
#
# XXX stale, and better to do this individually at the time the
# package is copied in
# configurable variables
pb=/var/portbuild
if [ $# -ne 2 ]; then
echo "prunebad <arch> <branch>"
exit 1
fi
arch=$1
branch=$2
shift 2
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
buildenv ${pb} ${arch} ${branch}
cd ${pb}/${arch}/${branch}
if [ -f .packagelock ]; then exit; fi
touch .packagelock
cd packages/All
if [ ! -f .packagestamp ]; then
newfiles=$(find . -name \*${PKGSUFFIX})
else
newfiles=$(find . -name \*${PKGSUFFIX} -newer ../../.packagestamp)
fi
touch ../../.packagestamp
echo Checking $newfiles
mkdir -p ${pb}/${arch}/${branch}/bad
echo "checking packages"
for i in ${newfiles}; do
if ! ${PKGZIPCMD} -t $i; then
echo "Warning: package $i is bad, moving to ${pb}/${arch}/${branch}/bad"
# the latest link will be left behind...
mv $i ${pb}/${arch}/${branch}/bad
rm ../*/$i
fi
done
cd ../..
rm .packagelock

View File

@ -1,88 +0,0 @@
#!/bin/sh
#
# Prune the failure files of stale entries
#
# This must be called via:
#
# lockf -k ${pb}/${arch}/${branch}/failure.lock ${pb}/scripts/prunefailure ${arch} ${branch} ${buildid}
#
# to avoid racing with any package builds in progress that might try to append to
# these files.
# configurable variables
pb=/var/portbuild
cleanup() {
echo "Problem writing new failure file!"
rm -f failure.new
exit 1
}
if [ $# -ne 3 ]; then
echo "prunefailure <arch> <branch> <buildid>"
exit 1
fi
arch=$1
branch=$2
buildid=$3
shift 3
. ${pb}/conf/server.conf
. ${pb}/conf/common.conf
. ${pb}/${arch}/portbuild.conf
. ${pb}/scripts/buildenv
builddir=${pb}/${arch}/${branch}/builds/${buildid}
buildenv ${pb} ${arch} ${branch} ${builddir}
home=${pb}/${arch}/${branch}
cd $home
pkgdir=${builddir}/packages/All
index=${PORTSDIR}/${INDEXFILE}
if [ "`wc -l $index | awk '{print $1}'`" -lt 9000 ]; then
echo "INDEX is corrupted, terminating!"
exit 1
fi
echo "===> Pruning old failure file"
rm -f failure.new
IFS='|'
while read dir name ver olddate date count; do
if [ -z "$dir" -o -z "$name" -o -z "$ver" -o -z "$olddate" -o -z "$date" -o -z "$count" ]; then
echo Malformed entry "$dir|$name|$ver|$olddate|$date|$count"
# Clean up the 'latest error log' symlink
rm -f ${pb}/${arch}/${branch}/latest/${dir}
continue
fi
entry=$(grep "|/usr/ports/$dir|" $index)
if [ -z "$entry" ]; then
echo $dir not in index
rm -f ${pb}/${arch}/${branch}/latest/${dir}
continue
fi
newver=$(echo $entry | awk '{print $1}')
if [ -e "${builddir}/packages/All/$newver${PKGSUFFIX}" ]; then
echo "$newver package exists, should not still be here!"
rm -f ${pb}/${arch}/${branch}/latest/${dir}
continue
fi
if grep -qxF $newver ${builddir}/duds.full; then
echo "$newver listed in duds, should not be here"
rm -f ${pb}/${arch}/${branch}/latest/${dir}
continue
fi
(echo "$dir|$name|$newver|$olddate|$date|$count" >> $home/failure.new) || cleanup
done < $home/failure
mv failure.new failure

View File

@ -1,66 +0,0 @@
#!/bin/sh
if [ $# -lt 2 ]; then
echo "usage: prunepkgs <indexfile> <pkgdir> [-dummy]"
return 1
fi
index=$1
pkgdir=$2
if [ $# -eq 3 -a "$3" = "-dummy" ]; then
dummy=1;
else
dummy=0;
fi
testprunelink() {
if [ ! -e $1 ]; then
dest=$(readlink $1)
echo "$1 -> $dest pruned."
if [ "${dummy}" = "0" ]; then
rm -f $1
fi
fi
}
# Set up work dir
tmpdir=$(mktemp -d -t prunepkgs)
trap "rm -rf $tmpdir; exit 1" 1 2 3 5 10 13 15
# Check for non-package files
extras=$(find ${pkgdir} -type f \! \( -name INDEX -o -name CHECKSUM.MD5 -o -name \*.tgz -o -name \*.tbz \) )
echo "==> Removing extra files"
echo $extras
if [ "x${extras}" != "x" ]; then
if [ "${dummy}" = "0" ]; then
rm -f ${extras}
fi
fi
# Check for files not present in INDEX
echo "==> Removing extra package files"
find $pkgdir/All -type f -name \*.tgz -o -name \*.tbz | sed -e "s,${pkgdir}/All/,," -e 's,\.tbz$,,' -e 's,\.tgz$,,' |sort > ${tmpdir}/files
cut -f 1 -d '|' ${index} |sort > ${tmpdir}/packages
extras=$(comm -2 -3 ${tmpdir}/files ${tmpdir}/packages)
echo $extras
if [ "${dummy}" = "0" ]; then
for i in $extras; do
rm -f $pkgdir/All/${i}.tgz $pkgdir/All/${i}.tbz
done
fi
rm -rf ${tmpdir}
# Look for dead links and prune them
echo "==> Removing dead symlinks"
links=$(find $pkgdir -type l)
for i in $links; do
testprunelink $i
done

View File

@ -1,75 +0,0 @@
/* ptimeout: executes command but kills it after a specified timeout
* usage: ptimeout timeout command args ...
*
* $FreeBSD$
*/
#include <sys/param.h>
#include <sys/types.h>
#include <signal.h>
#include <sys/wait.h>
#include <unistd.h>
#include <time.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char *argv[])
{
int timeout, status, i;
pid_t pid1, pid2, child;
char *command, args[MAXPATHLEN + 1];
time_t t;
if (argc < 3) {
printf("usage: %s timeout command [args ...]\n", argv[0]);
exit(1);
}
timeout = atoi(argv[1]);
command = argv[2];
bzero(args, MAXPATHLEN + 1);
for (i = 2; i < argc; i++) {
strlcat(args, argv[i], MAXPATHLEN - strlen(args));
strlcat(args, " ", MAXPATHLEN - strlen(args));
}
/*printf("timeout is %d\n", timeout);
printf("arguments are %s\n", args);*/
if ((pid1 = fork()) > 0) {
if ((pid2 = fork()) > 0) {
/* parent */
/*printf("child pids are %d %d\n", pid1, pid2);*/
child = wait(&status);
/*printf("exited child is %d, status is %d\n", child, status);*/
if (pid1 = child) {
/*printf("killing process %d\n", pid2);*/
kill(pid2, SIGKILL);
} else {
/*printf("killing process %d\n", pid1);*/
kill(pid1, SIGTERM);
}
/* exit status in upper 8 bits, killed signal (if any)
* in lower 8 bits
*/
exit((status >> 8) | (status & 0xff));
} else {
/* second child */
sleep(timeout);
t = time(NULL);
printf("ptimeout: killing %s (pid %d) since timeout of %d expired at %s", args, pid1, timeout, ctime(&t));
kill(pid1, SIGTERM);
exit(1);
}
} else {
/* first child */
/*printf("executing %s\n", args);*/
execvp(command, argv + 2);
}
/* Shouldn't be reached. */
return 0;
}

View File

@ -1,31 +0,0 @@
#!/bin/sh
buildroot=/var/portbuild
i=$1 #arch
m=$2 #machine
machonline=$(grep -c ${m} ${buildroot}/${i}/mlist)
if [ ${machonline} = "0" ]; then
rm ${buildroot}/${i}/queue/${m}
exit 0
fi
. ${buildroot}/${i}/portbuild.conf
if [ -f ${buildroot}/${i}/portbuild.${m} ]; then
. ${buildroot}/${i}/portbuild.${m}
fi
q=${buildroot}/${i}/queue/$m
if [ -f ${q} ]; then
num=$(cat $q)
else
num=${maxjobs}
fi
if [ "$num" -gt 0 ]; then
echo $((${num}-1)) > $q
fi
chown ports-${i} ${q}
exit 0

View File

@ -1,48 +0,0 @@
#!/bin/sh
# $FreeBSD$
# client script to report load to the server.
pb=/var/portbuild
# note: uname is not being overridden
arch=$(uname -m)
osver=$(sysctl -n kern.osreldate)
. ${pb}/${arch}/client.conf
. ${pb}/${arch}/portbuild.conf
if [ -f ${pb}/${arch}/portbuild.$(hostname) ]; then
. ${pb}/${arch}/portbuild.$(hostname)
fi
# Look for exceptional conditions
error=
for i in squid disk; do
if [ -f ${scratchdir}/.${i} ]; then
error="${i} ${error}"
fi
done
# XXX MCL DEBUG 20100727
# on occasion, a machine winds up with a huge number (> 300) of chroots.
# When this happens, the following command takes more than a minute, and
# pointyhat will pound it to death, resulting in 'kern.ipc.maxpipekva
# exceeded; see tuning(7)'. So, as a workaround until the reason for the
# stale chroots is understood, limit the time we allow this to run.
ulimit -t 10
num=$(echo $(ls -1d ${scratchdir}/*/*/*/used ${scratchdir}/*/*/chroot/*/used 2>/dev/null| wc -l))
echo "arch=${arch}"
echo "osversion=${osver}"
echo "jobs=${num}"
cd /var/portbuild/${arch}
for i in */builds/*; do
buildenv=${arch}/${i%%/*}/${i##*/}
if [ -f ${i}/.ready ]; then
buildenvs="${buildenv} ${buildenvs}"
fi
done
echo "buildenvs=${buildenvs}"
echo -n "load="
uptime
echo "error=${error}"

View File

@ -1,15 +0,0 @@
#!/bin/sh
#
# For use on the client. Start the load reporting service at system
# boot time.
if [ "$1" != "start" ]; then exit; fi
pb=/var/portbuild
arch=$(cat /etc/arch)
s=$pb/scripts/reportload
if [ -x $s ]; then
$s ${arch} &
echo -n ' reportload'
fi

View File

@ -1,22 +0,0 @@
code meaning sent by
---- ---------------------------------------- -----------
1 error: invalid build id, etc. pdispatch
201 success: qmanager command executed qmanager
202 success: machine selected qmanager
203 success: job blocked qmanagerobj
253 success: setting up buildid on machine pdispatch
254 error: failed to claim chroot on host/
chroot is empty/
build failed uncleanly pdispatch
255 error: copy failed portbuild
401 error: could not connect qmanagerhandler
402 error: machine modified but unknown qmanager
202 error: job would block qmanagerobj
404 error: no such job to release/reconnect qmanager
406 error: machine modified but input error qmanager
407 error: bogus connection/release request qmanager
408 error: machine modified but not by root qmanager
409 error: machine modified but current jobs qmanager
410 error: job to reconnect is still connected qmanager
411 error: machine modified but already exists qmanager
412 error: job to release is still connected qmanager

View File

@ -1,169 +0,0 @@
#!/bin/sh
#
# Script run on the clients, to set them up in preparation for building
# packages. This includes setting up parts of the /var/portbuild
# directory hierarchy, the portbuild script and the bindist.tar file
# for populating the build chroots.
if [ $# -lt 8 ]; then
echo "usage: $0 portbuilddir arch branch buildid tmpdir portsmd5 srcmd5 phase [-nocopy] [-force]"
exit 1
fi
pb=$1
arch=$2
branch=$3
buildid=$4
tmpdir=$5
portsmd5=$6
srcmd5=$7
bindistmd5=$8
phase=$9
shift 9
precopy() {
# Create directories and symlinks for later population
# Timestamp of finished builds
mkdir -p ${tmpdir}/stamp/
# Prepare all directories, they will be populated by a rsync
# push from the master
mkdir -p ${pb}/scripts ${pb}/${arch}/clients/
if [ "${buildid}" != "-" -a "${branch}" != "-" ]; then
if [ ${nocopy} -eq 0 ]; then
mkdir -p ${builddir}
fi
fi
}
postcopy() {
if [ "${buildid}" != "-" -a "${branch}" != "-" ]; then
# Extract ports trees and cleanup
if [ ${nocopy} -eq 0 ]; then
cd ${builddir} || return 1
# Unpack bindist
if [ -f bindist.tbz.md5 -a "${force}" -ne 1 ]; then
localbindistmd5=$(awk '{print $4}' bindist.tbz.md5)
else
localbindistmd5=0
fi
if [ ${localbindistmd5} != ${bindistmd5} -o ! -f bindist.tar ]; then
if [ -f bindist.tar ]; then
rm -f bindist.tar
fi
bzcat bindist.tbz > bindist.tar || return 1
fi
# Unpack ports
if [ -f ports-${buildid}.tbz.md5 -a "${force}" -ne 1 ]; then
localportsmd5=$(awk '{print $4}' ports-${buildid}.tbz.md5)
else
localportsmd5=0
fi
if [ ${localportsmd5} != ${portsmd5} -o ! -d ports ]; then
if [ -d ports ]; then
mv ports ports~
mkdir ports
rm -rf ports~ &
fi
tar xfj ports-${buildid}.tbz || return 1
fi
# Unpack src
if [ -f src-${buildid}.tbz.md5 -a "${force}" -ne 1 ]; then
localsrcmd5=$(awk '{print $4}' src-${buildid}.tbz.md5)
else
localsrcmd5=0
fi
if [ ${localsrcmd5} != ${srcmd5} -o ! -d src ]; then
if [ -d src ]; then
mv src src~
mkdir src
rm -rf src~ &
fi
tar xfj src-${buildid}.tbz || return 1
fi
touch .ready
fi
# Clean up the tmpdir
# By now the portbuild.conf files are in place so we can source them
. ${pb}/${arch}/portbuild.conf
me=$(hostname)
if [ -f ${pb}/${arch}/portbuild.${me} ] ; then
. ${pb}/${arch}/portbuild.${me}
fi
if [ "${buildid}" != "-" -a "${branch}" != "-" ]; then
mkdir -p ${wrkdir}/chroot
if [ "${use_zfs}" -eq 1 ]; then
zbase=${scratchdir#/}
zfs create ${zbase}/${branch} || true
zfs create ${zbase}/${branch}/${buildid} || true
zfs create ${zbase}/${branch}/${buildid}/world || true
zfs create ${zbase}/${branch}/${buildid}/chroot || true
tar xfpC ${builddir}/bindist.tar ${scratchdir}/${branch}/${buildid}/world
tar xfpC ${pb}/${arch}/clients/bindist-$(hostname).tar ${scratchdir}/${branch}/${buildid}/world
zfs snapshot ${zbase}/${branch}/${buildid}/world@base
else
mkdir -p ${wrkdir}/tarballs
if [ ${nocopy} -eq 0 ]; then
ln -sf ${pbab}/builds/${buildid}/bindist.tar ${wrkdir}/tarballs
ln -sf ${pb}/${arch}/clients/bindist-$(hostname).tar ${wrkdir}/tarballs
fi
fi
fi
for i in ${wrkdir}/chroot/*; do
${sudo_cmd} ${pb}/scripts/clean-chroot ${arch} ${branch} ${buildid} ${i} 2
done
wait
else
# Client may have been waiting for us to set it up, so finish
# the job.
touch /tmp/.boot_finished
fi
}
if [ "${branch}" != "-" -a "${buildid}" != "-" ]; then
pbab=${pb}/${arch}/${branch}
builddir=${pbab}/builds/${buildid}
wrkdir=${tmpdir}/${branch}/${buildid}
fi
nocopy=0
force=0
while [ $# -ge 1 ]; do
case $1 in
-nocopy)
nocopy=1
;;
-force)
force=1
;;
esac
shift
done
case ${phase} in
pre-copy)
precopy
;;
post-copy)
postcopy
;;
*)
echo "Invalid phase ${phase}"
exit 1
esac

View File

@ -1,11 +0,0 @@
#!/bin/sh
#
# $FreeBSD$
#
# show currently running builds in terse format
#
ps axww | \
grep "/var/portbuild/scripts/pdispatch" | \
grep -v "grep /var/portbuild/scripts/pdispatch" | \
sed -e "s@.*pdispatch @@;s@/var/portbuild/scripts/portbuild .*/usr/ports/@@;s@^ @@g;s@ @-@" | \
sort

View File

@ -1,20 +0,0 @@
#!/bin/sh
pb=/var/portbuild
. ${pb}/conf/server.conf
if [ $# -ne 1 ]; then
echo "usage: <branch>"
exit 1
fi
branch=$1
for i in ${SUPPORTED_ARCHS}; do
all=${pb}/$i/${branch}/builds/latest/packages/All
if [ -d ${all} ]; then
count=$(find ${all} -name \*.tbz -o -name \*.tgz |wc -l)
echo -n "$i: ${count} "
fi
done
echo

View File

@ -1,190 +0,0 @@
#!/usr/bin/env python
# Initially by pav, refactored by kris
#
# XXX Todo: handle ipv6 sockets (used by e.g. sparc64)
import commands
import socket
import sys
archwidth = branchwidth = portwidth = hostwidth = 0
def getpdispatch():
""" get list of pdispatch processes """
global archwidth, branchwidth, portwidth;
args = sys.argv
data = {}
cmd = 'pgrep -lf "pdispatch '+' '.join(args[1:])+'"'
for line in commands.getoutput(cmd).splitlines():
if line.count('pgrep -lf "pdispatch'):
continue;
fields = line.split()
pid = fields[0]
arch = fields[3]
branch = fields[4]
port = fields[9].replace('/usr/ports/','')
line = ' '.join(fields)
if len(arch) > archwidth:
archwidth = len(arch)
if len(branch) > branchwidth:
branchwidth = len(branch)
if len(port) > portwidth:
portwidth = len(port)
data[pid] = {'arch': arch, 'branch': branch, 'port': port}
if branch == 'pgrep':
print line
return data
def getparent(ppid_map, pid):
"""walk up the ppid tree and return the parent pdispatch"""
next = pid
while ppid_map.has_key(next):
next = ppid_map[next]['ppid']
return next
def getallsubprocs(pids):
"""recursively find all subprocs of the list in pids"""
ppids = []
nppids = pids
while len(nppids):
pidlist=",".join(nppids)
nppids = commands.getoutput('pgrep -P ' + pidlist).splitlines()
ppids.extend(nppids)
return ppids
def dosubprocs(data):
""" recursively get list of subprocesses (ptimeout.host, sleep 15, ...) and fill in data """
ppid_map = { }
idlers = { }
ppid_arg = ",".join(getallsubprocs(data.keys()))
list = commands.getoutput('ps ax -o pid,ppid,etime,command -p ' + ppid_arg).splitlines()[1:]
for line in list:
fields = line.split()
pid = fields.pop(0)
ppid = fields.pop(0)
time = fields.pop(0)
command = ' '.join(fields)
if command.count('ptimeout.host'):
command = "building"
elif command == 'sleep 15':
command = "waiting for idle node"
idlers[ppid] = ppid
elif command.count('scripts/clean-chroot'):
command = "cleaning up the node"
elif command.count('scripts/claim-chroot'):
command = "preparing node"
elif command.count('tar --unlink'):
command = "copying package"
elif command.count('buildsuccess'):
command = "registering success"
elif command.count('buildfailure'):
command = "registering failure"
elif command.count('scp ') and command.count('.log '):
command = "copying logs"
elif command.count('ssh'):
command = "building"
ppid_map[pid] = {'ppid':ppid, 'command':command, 'time':time}
# fill in sleeper's parent pid etimes so we display how long the pdispatch
# has been trying to acquire a chroot, instead of the <15 second sleep
# lifetime
if len(idlers):
ppid_arg = ','.join(idlers)
lines = commands.getoutput('ps ax -o pid,etime -p ' + ppid_arg).splitlines()[1:]
for line in lines:
fields = line.split()
ppid = fields[0]
time = fields[1]
data[ppid]['time'] = time
# Propagate commands and runtime to parent pdispatch. We have
# to do this after the loop above because ps sorts it output
# and we are not guaranteed to have processes the ppid before
# the pid. The alternative is multiple ps invocations which
# is slower.
for pid in ppid_map.iterkeys():
pppid = getparent(ppid_map, pid) # Find ancestor pdispatch
ppid_map[pid].update({'pppid':pppid})
blob = data[pppid]
# propagate command and time to parent if necessary
if not blob.has_key('command'):
blob['command'] = ppid_map[pid]['command']
if not blob.has_key('time'):
blob['time'] = ppid_map[pid]['time']
return ppid_map
# get list of network sockets and match to pdispatch children
def getsockets(ppid_map, data):
global hostwidth
# XXX what about ipv6?
s = commands.getoutput('sockstat -4 -c')
list = s.splitlines()
list.pop(0)
for line in list:
line = line[20:]
fields = line.split()
spid = fields.pop(0)
if len(fields) < 3:
continue
tuple = fields.pop(3)
if tuple.find(':') == -1:
continue
(host, port) = tuple.split(':')
# Check if the socket belongs to one of our pids
if ppid_map.has_key(spid):
ppid = ppid_map[spid]['pppid'] # Map to pdispatch
if data.has_key(ppid):
try:
(hostname, bork, bork) = socket.gethostbyaddr(host)
except socket.herror:
hostname = host
blob = data[ppid]
blob['host'] = hostname
if len(hostname) > hostwidth:
hostwidth = len(hostname)
if __name__ == "__main__":
data = getpdispatch()
if len(data) == 0:
print "No matching jobs"
sys.exit()
pids = data.keys()
ppid_map = dosubprocs(data)
getsockets(ppid_map, data)
# format for output
output = [ ]
for pid in data:
blob = data[pid]
if not blob.has_key('host'):
blob['host'] = ''
if not blob.has_key('command'):
blob['command'] = 'Dispatching'
if not blob.has_key('time'):
blob['time'] = '00:00'
output.append(blob['arch'].ljust(archwidth + 2) + \
blob['branch'].ljust(branchwidth + 2) + \
blob['port'].ljust(portwidth + 2) + \
blob['host'].ljust(hostwidth + 2) + \
blob['command'] + " (" + blob['time'] + ")")
# sort, output
output.sort()
print '\n'.join(output)

View File

@ -1,56 +0,0 @@
#!/bin/sh
#
# Update the master source trees that are used by package builds
# and other consumers
pb=/var/portbuild
. ${pb}/conf/server.conf
base=${ZFS_MOUNTPOINT}/${SNAP_SRC_DIRECTORY_PREFIX}
zbase=${ZFS_VOLUME}/${SNAP_SRC_DIRECTORY_PREFIX}
supstamp() {
fulldate=$1
date -j -f %+ "${fulldate}" +%Y.%m.%d.%H.%M.%S
}
stamp() {
fulldate=$1
date -j -f %+ "${fulldate}" +%Y%m%d%H%M%S
}
finish() {
err=$1
end=$(date +%s)
echo "Finished at $(date)"
len=$((end-begin))
echo "Duration = $(date -j -f %s +%H:%M:%S ${len})"
exit 1
}
begin=$(date +%s)
echo "Started at $(date)"
# We need to preserve group writability so portmgr group can write
umask 002
# for branch in 8; do
for branch in $SRC_BRANCHES; do
cd ${base}${branch}
fulldate=$(date)
supdate=$(supstamp ${fulldate})
eval tag=\$SRC_BRANCH_${branch}_TAG
cat ${SRC_MASTER_SUPFILE} | \
sed "s|%%DATE%%|${supdate}|;s|%%PREFIX%%|${base}${branch}|;s|%%TAG%%|${tag}|" \
> ${SRC_SUPFILE}.${branch}.now
# example destination directory: /a/snap/src-6/src/ (tricky!)
csup -L 2 ${SRC_SUPFILE}.${branch}.now > src-csup.${branch}.log
echo ${fulldate} > src/.updated
# hack for zfs breakiness
find . -group wheel|xargs chgrp portmgr
snapdate=$(stamp ${fulldate})
zfs snapshot ${zbase}${branch}/src@${snapdate}
done
finish 0

View File

@ -1,52 +0,0 @@
#!/bin/sh
#
# Update the master ports tree that is used by package builds
# and other consumers
pb=/var/portbuild
. ${pb}/conf/server.conf
base=${ZFS_MOUNTPOINT}/${SNAP_PORTS_DIRECTORY}
zbase=${ZFS_VOLUME}/${SNAP_PORTS_DIRECTORY}
supstamp() {
fulldate=$1
date -j -f %+ "${fulldate}" +%Y.%m.%d.%H.%M.%S
}
stamp() {
fulldate=$1
date -j -f %+ "${fulldate}" +%Y%m%d%H%M%S
}
finish() {
err=$1
end=$(date +%s)
echo "Finished at $(date)"
len=$((end-begin))
echo "Duration = $(date -j -f %s +%H:%M:%S ${len})"
exit 1
}
begin=$(date +%s)
echo "Started at $(date)"
# We need to preserve group writability so portmgr group can write
umask 002
cd ${base}
fulldate=$(date)
supdate=$(supstamp ${fulldate})
cat ${PORTS_MASTER_SUPFILE} | \
sed "s|%%DATE%%|${supdate}|;s|%%PREFIX%%|${base}|" \
> ${PORTS_SUPFILE}.now
# example destination directory: /a/snap/ports-head/ports/
csup -L 2 ${PORTS_SUPFILE}.now > ports-csup.log
echo ${fulldate} > ports/.updated
# hack for zfs breakiness
find . -group wheel|xargs chgrp portmgr
snapdate=$(stamp ${fulldate})
zfs snapshot ${zbase}/ports@${snapdate}
finish 0

View File

@ -1,233 +0,0 @@
#!/usr/bin/env python
# Back up a list of ZFS filesystems, doing a full backup periodically
# and using incremental diffs in between
import zfs, commands, datetime, sys, os, bz2
from signal import *
# List of filesystems to backup
# XXX MCL
backuplist=["a", "a/nfs", "a/local", "a/portbuild",
"a/portbuild/amd64", "a/portbuild/i386",
"a/portbuild/ia64", "a/portbuild/powerprc",
"a/portbuild/sparc64"]
# Directory to store backups
# XXX MCL
backupdir="/dumpster/pointyhat/backup"
# How many days between full backups
# XXX MCL
fullinterval=9
def validate():
fslist = zfs.getallfs()
missing = set(backuplist).difference(set(fslist))
if len(missing) > 0:
print "Backup list refers to filesystems that do not exist: %s" % missing
sys.exit(1)
def mkdirp(path):
plist = path.split("/")
for i in xrange(2,len(plist)+1):
sofar = "/".join(plist[0:i])
if not os.path.isdir(sofar):
os.mkdir(sofar)
class node(object):
child=None
parent=None
name=None
visited=0
def __init__(self, name):
self.name = name
self.child = []
self.parent = None
self.visited = 0
print "zbackup: starting at " + datetime.datetime.now().ctime()
for fs in backuplist:
print
dir = backupdir + "/" + fs
mkdirp(dir)
snaplist = None
try:
snaplist = [snap[0] for snap in zfs.getallsnaps(fs) if snap[0].isdigit()]
except zfs.NoSuchFS:
print "no such fs %s, skipping" % fs
continue
dofull = 0
# Mapping from backup date tag to node
backups={}
# list of old-new pairs seen
seen=[]
# Most recent snapshot date
latest = "0"
for j in os.listdir(dir):
(old, sep, new) = j.partition('-')
if not old.isdigit() or not new.isdigit():
continue
seen.append("%s-%s" % (old, new))
if int(old) >= int(new):
print "Warning: backup sequence not monotonic: %s >= %s" % (old, new)
continue
try:
oldnode = backups[old]
except KeyError:
oldnode = node(old)
backups[old] = oldnode
try:
newnode = backups[new]
except KeyError:
newnode = node(new)
backups[new] = newnode
if int(new) > int(latest):
latest = new
oldnode.child.append(newnode)
if newnode.parent:
# We are not a tree!
if not dofull:
print "Multiple backup sequences found, forcing full dump!"
dofull = 1
continue
newnode.parent = oldnode
if not "0" in backups and not dofull:
# No root!
print "No full backup found!"
dofull = 1
if not latest in snaplist and not dofull:
print "Latest dumped snapshot no longer exists: forcing full dump"
dofull = 1
now = datetime.datetime.now()
nowdate = now.strftime("%Y%m%d%H%M")
try:
prev = datetime.datetime.strptime(latest, "%Y%m%d%H%M")
except ValueError:
if not dofull:
print "Unable to parse latest snapshot as a date, forcing full dump!"
dofull = 1
print "Creating zfs snapshot %s@%s" % (fs, nowdate)
zfs.createsnap(fs, nowdate)
# Find path from latest back to root
try:
cur = backups[latest]
except KeyError:
cur = None
chain = []
firstname = "0"
# Skip if latest doesn't exist or chain is corrupt
while cur:
chain.append("%s-%s" % (cur.parent.name, cur.name))
par = cur.parent
# Remove from the backup tree so we can delete the leftovers
# below
par.child.remove(cur)
cur.parent=None
if par.name == "0":
firstname = cur.name
break
cur = par
chain.reverse()
print "chain is " + str( chain )
# Prune stale links not in the backup chain
for j in backups.iterkeys():
cur = backups[j]
for k in cur.child:
stale="%s-%s" % (cur.name, k.name)
print "Deleting stale backup %s" % stale
os.remove("%s/%s/%s" % (backupdir, fs, stale))
# Lookup date of full dump
try:
first = datetime.datetime.strptime(firstname, "%Y%m%d%H%M")
except ValueError:
if not dofull:
print "Unable to parse first snapshot as a date, forcing full dump!"
dofull = 1
if not dofull and (now - first) > datetime.timedelta(days=fullinterval):
print "Previous full backup too old, forcing full dump!"
dofull = 1
# In case we are interrupted don't leave behind a truncated file
# that will corrupt the backup chain
if dofull:
latest = "0"
outfile="%s/%s/.%s-%s" % (backupdir, fs, latest, nowdate)
# zfs send aborts on receiving a signal
signal(SIGTSTP, SIG_IGN)
if not dofull:
print "Doing incremental backup of %s: %s-%s" % (fs, latest, nowdate)
(err, out) = \
commands.getstatusoutput("zfs send -i %s %s@%s | bzip2 > %s" %
(latest, fs, nowdate, outfile))
else:
print "Doing full backup of %s" % fs
latest = "0"
(err, out) = \
commands.getstatusoutput("zfs send %s@%s | bzip2 > %s" %
(fs, nowdate, outfile))
signal(SIGTSTP, SIG_DFL)
if err:
print "Error from snapshot: (%s, %s)" % (err, out)
try:
os.remove(outfile)
print "Deleted file %s" % outfile
except OSError, err:
print "OSError: " + repr(err)
if err.errno != 2:
raise
finally:
sys.exit(1)
# We seem to be finished
try:
os.rename(outfile, "%s/%s/%s-%s" % (backupdir, fs, latest, nowdate))
except:
print "Error renaming dump file" + outfile + "!"
raise
if dofull:
for i in seen:
print "Removing stale snapshot %s/%s" % (dir, i)
os.remove("%s/%s" % (dir, i))
print
print "zbackup: ending at " + datetime.datetime.now().ctime()

View File

@ -1,124 +0,0 @@
#!/usr/bin/env python
# ZFS snapshot client
import socket, os, sys
ZSERVER = ('gohan10.freebsd.org', 8888)
ZFSLOCAL = '/tmp/.zserver'
def connect():
""" Connects to service, returns (socket, islocal) """
if os.path.exists(ZFSLOCAL):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.connect(ZFSLOCAL)
return (s, True)
except:
s.close()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(ZSERVER)
return (s, False)
except:
s.close()
return (None, None)
def send(sock, cmd):
""" Send a command, retrieve single line of reply """
sock.write(cmd)
sock.flush()
res= sock.readline()
#print "res = %s" % res
return res
def error(msg):
print >>sys.stderr, "%s: %s" % (sys.argv[0], msg.rstrip())
sys.exit(1)
def do_list(sockfile, islocal, args):
res = send(sockfile, "LIST\n")
if res[0] == "2":
for i in sockfile:
print i.rstrip()
else:
error(res[4:])
def do_get(sockfile, islocal, args):
res = send(sockfile, "GET %s %s\n" % (args[0], args[1]))
if res[0] == "2":
while True:
block = sockfile.read(32*1024)
if not block:
break
sys.stdout.write(block)
else:
error(res[4:])
def do_diff(sockfile, islocal, args):
res = send(sockfile, "DIFF %s %s %s\n" % (args[0], args[1], args[2]))
if res[0] == "2":
while True:
block = sockfile.read(32*1024)
if not block:
break
sys.stdout.write(block)
else:
error(res[4:])
def do_reg(sockfile, islocal, args):
if not sock[1]:
error("must register on local machine")
res = send(sockfile, "REGISTER %s\n" % args[0])
if res[0] == "2":
print res[4:]
else:
error(res[4:])
def do_unreg(sockfile, islocal, args):
if not sock[1]:
error("must register on local machine")
res = send(sockfile, "UNREGISTER %s\n" % args[0])
if res[0] == "2":
print res[4:]
else:
error(res[4:])
def do_help(sockfile, islocal, args):
for (i, val) in sorted(cmddict.iteritems()):
print "%15s - %s" % (i, val[1])
cmddict = {'list':(do_list, 'List available filesystem/snapshot pairs'),
'get':(do_get, 'Get a snapshot'),
'diff':(do_diff, 'Get the diffs between two snapshots'),
'register':(do_reg, 'Register a new filesystem (privileged)'),
'reg':(do_reg, 'Alias for register'),
'unregister':(do_unreg, 'Register a new filesystem (privileged)'),
'unreg':(do_unreg, 'Alias for register'),
'help':(do_help, 'Display this help')}
if __name__ == "__main__":
try:
sock = connect()
except:
raise
sys.exit(1)
args = sys.argv
try:
cmd = args[1]
arg = args[2:]
# print "cmd = %s, arg = %s" % (cmd, arg)
cmddict[cmd][0](sock[0].makefile(), sock[1], arg)
except (KeyError, IndexError):
raise
error("No such command\n")

View File

@ -1,58 +0,0 @@
#!/usr/bin/env python
#
# Expire old snapshots
import zfs, commands, datetime, os
# List of filesystems to expire
expirelist=(("a", 14),
("a/nfs", 14),
("a/local", 14),
("a/portbuild", 14),
("a/portbuild/amd64", 14),
("a/portbuild/i386", 14),
("a/portbuild/ia64", 14),
("a/portbuild/powerpc", 14),
("a/portbuild/sparc64", 14),
("a/snap", 7),
("a/snap/ports", 2),
("a/snap/src-7", 2),
("a/snap/src-8", 2),
("a/snap/src-9", 2),
("a/snap/world-amd64-HEAD", 7),
("a/snap/world-i386-HEAD", 7))
now = datetime.datetime.now()
print "zexpire: starting at " + now.ctime()
for (fs, maxage) in expirelist:
print
try:
snapdata = zfs.getallsnaps(fs)
except zfs.NoSuchFS:
print "no such fs %s, skipping" % fs
continue
snaps = (i[0] for i in snapdata)
for snap in snaps:
try:
snapdate = datetime.datetime.strptime(snap, "%Y%m%d%H%M")
except ValueError:
try:
snapdate = datetime.datetime.strptime(snap, "%Y%m%d%H%M%S")
except ValueError:
print "zexpire: don't know what to do with snap `" + snap + "'"
continue
if (now - snapdate) > datetime.timedelta(days=maxage):
print "Snapshot %s@%s too old, attempting zfs destroy" % (fs, snap)
(err, out) = commands.getstatusoutput("zfs destroy %s@%s" % (fs,snap))
if err:
print "Error deleting snapshot", out
then = datetime.datetime.now()
print
print "zexpire: ending at " + then.ctime()