提交 9a14adff 编写于 作者: M Mark Hymers

Merge commit 'ftpmaster/master'

......@@ -43,27 +43,6 @@ function make_buildd_dir () {
find ./tree -mindepth 1 -maxdepth 1 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
}
# move accepted NEW packages from stagedir into queue/accepted
function acceptnew () {
return
# cd $newstage
# for file in $(find . -maxdepth 1 -mindepth 1 -type f -name \*.changes | sed -e "s,./,," | xargs); do
# sed '1,/Files:/d' "${file}" | sed '/BEGIN PGP SIGNATURE/,$d' \
# | while read notused1 notused2 notused3 notused4 NAME; do
# if [ -z "${NAME}" ]; then
# # Sometimes there is a newline after the Files:, ignore it
# continue
# fi
# if [ -f "${NAME}" ]; then
# mv --target-directory="${accepted}" "${NAME}"
# else
# log_error "Error, couldn't find file ${NAME} to move to ${accepted}"
# fi
# done
# mv --target-directory="${accepted}" "${file}" "${file%%.changes}.dak"
# done
}
# Do the unchecked processing, in case we have files.
function do_unchecked () {
cd $unchecked
......
......@@ -44,417 +44,8 @@ export SCRIPTVARS=/srv/ftp.debian.org/dak/config/debian/vars
# common functions are "outsourced"
. "${configdir}/common"
# Timestamp. Used for dinstall stat graphs
function ts() {
echo "Archive maintenance timestamp ($1): $(date +%H:%M:%S)"
}
# Cleanup actions
function cleanup() {
rm -f ${LOCK_DAILY}
rm -f ${LOCK_ACCEPTED}
}
# If we error out this one is called, *FOLLOWED* by cleanup above
function onerror() {
ERRDATE=$(date "+%Y.%m.%d-%H:%M:%S")
subject="ATTENTION ATTENTION!"
if [ "${error}" = "false" ]; then
subject="${subject} (continued)"
else
subject="${subject} (interrupted)"
fi
subject="${subject} dinstall error at ${ERRDATE} in ${STAGEFILE} - (Be quiet, Brain, or I'll stab you with a Q-tip)"
cat "${STAGEFILE}.log" | mail -s "${subject}" -a "X-Debian: DAK" cron@ftp-master.debian.org
}
########################################################################
# the actual dinstall functions follow #
########################################################################
# Setup the notice file to tell bad mirrors they used the wrong time
function notice() {
rm -f "$NOTICE"
cat > "$NOTICE" <<EOF
Packages are currently being installed and indices rebuilt.
Maintenance is automatic, starting at 01|07|13|19:52 UTC,
and ending about an hour later. This file is then removed.
You should not mirror the archive during this period. If you find this
file on a Debian mirror please have a nice talk with the admin. They
are doing something wrong.
EOF
}
# pushing merkels QA user, part one
function merkel1() {
log "Telling merkels QA user that we start dinstall"
ssh -2 -i ~dak/.ssh/push_merkel_qa -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 qa@merkel.debian.org sleep 1
}
# Create the postgres dump files
function pgdump_pre() {
log "Creating pre-daily-cron-job backup of projectb database..."
pg_dump projectb > $base/backup/dump_pre_$(date +%Y.%m.%d-%H:%M:%S)
}
function pgdump_post() {
log "Creating post-daily-cron-job backup of projectb database..."
cd $base/backup
POSTDUMP=$(date +%Y.%m.%d-%H:%M:%S)
pg_dump projectb > $base/backup/dump_$POSTDUMP
pg_dumpall --globals-only > $base/backup/dumpall_$POSTDUMP
ln -sf $base/backup/dump_$POSTDUMP current
ln -sf $base/backup/dumpall_$POSTDUMP currentall
}
# Load the dak-dev projectb
function pgdakdev() {
cd $base/backup
echo "drop database projectb" | psql -p 5433 template1
cat currentall | psql -p 5433 template1
createdb -p 5433 -T template0 projectb
fgrep -v '\connect' current | psql -p 5433 projectb
}
# Updating various files
function updates() {
log "Updating Bugs docu, Mirror list and mailing-lists.txt"
cd $configdir
$scriptsdir/update-bugdoctxt
$scriptsdir/update-mirrorlists
$scriptsdir/update-mailingliststxt
$scriptsdir/update-pseudopackages.sh
}
# Process (oldstable)-proposed-updates "NEW" queue
function punew_do() {
cd "${queuedir}/${1}"
date -u -R >> REPORT
dak process-new -a -C COMMENTS >> REPORT || true
echo >> REPORT
}
function punew() {
log "Doing automated p-u-new processing"
punew_do "$1"
}
function opunew() {
log "Doing automated o-p-u-new processing"
punew_do "$1"
}
# The first i18n one, syncing new descriptions
function i18n1() {
log "Synchronizing i18n package descriptions"
# First sync their newest data
cd ${scriptdir}/i18nsync
rsync -aq --delete --delete-after ddtp-sync:/does/not/matter . || true
# Now check if we still know about the packages for which they created the files
# is the timestamp signed by us?
if $(gpgv --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg timestamp.gpg timestamp); then
# now read it. As its signed by us we are sure the content is what we expect, no need
# to do more here. And we only test -d a directory on it anyway.
TSTAMP=$(cat timestamp)
# do we have the dir still?
if [ -d ${scriptdir}/i18n/${TSTAMP} ]; then
# Lets check!
if ${scriptsdir}/ddtp-i18n-check.sh . ${scriptdir}/i18n/${TSTAMP}; then
# Yay, worked, lets copy around
for dir in squeeze sid; do
if [ -d dists/${dir}/ ]; then
cd dists/${dir}/main/i18n
rsync -aq --delete --delete-after . ${ftpdir}/dists/${dir}/main/i18n/.
fi
cd ${scriptdir}/i18nsync
done
else
echo "ARRRR, bad guys, wrong files, ARRR"
echo "Arf, Arf, Arf, bad guys, wrong files, arf, arf, arf" | mail -s "Don't you kids take anything. I'm watching you. I've got eye implants in the back of my head." debian-l10n-devel@lists.alioth.debian.org
fi
else
echo "ARRRR, missing the timestamp ${TSTAMP} directory, not updating i18n, ARRR"
echo "Arf, Arf, Arf, missing the timestamp ${TSTAMP} directory, not updating i18n, arf, arf, arf" | mail -s "Lisa, if you don't like your job you don't strike. You just go in every day and do it really half-assed. That's the American way." debian-l10n-devel@lists.alioth.debian.org
fi
else
echo "ARRRRRRR, could not verify our timestamp signature, ARRR. Don't mess with our files, i18n guys, ARRRRR."
echo "Arf, Arf, Arf, could not verify our timestamp signature, arf. Don't mess with our files, i18n guys, arf, arf, arf" | mail -s "You can't keep blaming yourself. Just blame yourself once, and move on." debian-l10n-devel@lists.alioth.debian.org
fi
}
# Process the accepted queue
function accepted() {
log "Processing queue/accepted"
rm -f "$accepted/REPORT"
dak process-accepted -pa -d "$accepted" > "$accepted/REPORT"
cat "$accepted/REPORT" | mail -s "Install for $(date +"%D - %R")" ftpmaster@ftp-master.debian.org
chgrp debadmin "$accepted/REPORT"
chmod 664 "$accepted/REPORT"
}
function cruft() {
log "Checking for cruft in overrides"
dak check-overrides
}
function msfl() {
log "Generating suite file lists for apt-ftparchive"
dak make-suite-file-list
}
function fingerprints() {
log "Updating fingerprints"
dak import-keyring -L /srv/keyring.debian.org/keyrings/debian-keyring.gpg
OUTFILE=$(mktemp)
dak import-keyring --generate-users "%s" /srv/keyring.debian.org/keyrings/debian-maintainers.gpg >"${OUTFILE}"
if [ -s "${OUTFILE}" ]; then
/usr/sbin/sendmail -odq -oi -t -f envelope@ftp-master.debian.org <<EOF
From: Debian FTP Masters <ftpmaster@ftp-master.debian.org>
To: <debian-project@lists.debian.org>
Subject: Debian Maintainers Keyring changes
Content-Type: text/plain; charset=utf-8
MIME-Version: 1.0
The following changes to the debian-maintainers keyring have just been activated:
$(cat $OUTFILE)
Debian distribution maintenance software,
on behalf of the Keyring maintainers
EOF
fi
rm -f "$OUTFILE"
}
function overrides() {
log "Writing overrides into text files"
cd $overridedir
dak make-overrides
# FIXME
rm -f override.sid.all3
for i in main contrib non-free main.debian-installer; do cat override.sid.$i >> override.sid.all3; done
}
function mpfm() {
log "Generating package / file mapping"
dak make-pkg-file-mapping | bzip2 -9 > $base/ftp/indices/package-file.map.bz2
}
function packages() {
log "Generating Packages and Sources files"
cd $configdir
GZIP='--rsyncable' ; export GZIP
apt-ftparchive generate apt.conf
}
function pdiff() {
log "Generating pdiff files"
dak generate-index-diffs
}
function release() {
log "Generating Release files"
dak generate-releases
}
function dakcleanup() {
log "Cleanup old packages/files"
dak clean-suites -m 10000
dak clean-queues
}
function buildd() {
# Needs to be rebuilt, as files have moved. Due to unaccepts, we need to
# update this before wanna-build is updated.
log "Regenerating wanna-build/buildd information"
psql projectb -A -t -q -c "SELECT filename FROM queue_build WHERE suite = 5 AND queue = 0 AND in_queue = true AND filename ~ 'd(sc|eb)$'" > $dbdir/dists/unstable_accepted.list
symlinks -d /srv/incoming.debian.org/buildd > /dev/null
apt-ftparchive generate apt.conf.buildd
}
function buildd_dir() {
# Rebuilt the buildd dir to avoid long times of 403
log "Regenerating the buildd incoming dir"
STAMP=$(date "+%Y%m%d%H%M")
make_buildd_dir
}
function scripts() {
log "Running various scripts from $scriptsdir"
cd $scriptsdir
./mkmaintainers
./copyoverrides
./mklslar
./mkfilesindices
./mkchecksums
}
function mirror() {
echo "Regenerating \"public\" mirror/ hardlink fun"
cd ${mirrordir}
rsync -aH --link-dest ${ftpdir} --exclude Archive_Maintenance_In_Progress --delete --delete-after --ignore-errors ${ftpdir}/. .
}
function wb() {
log "Trigger daily wanna-build run"
ssh -o BatchMode=yes -o SetupTimeOut=90 -o ConnectTimeout=90 wbadm@buildd /org/wanna-build/trigger.daily || echo "W-B trigger.daily failed" | mail -s "W-B Daily trigger failed" ftpmaster@ftp-master.debian.org
}
function expire() {
log "Expiring old database dumps..."
cd $base/backup
$scriptsdir/expire_dumps -d . -p -f "dump_*"
}
function transitionsclean() {
log "Removing out of date transitions..."
cd $base
dak transitions -c -a
}
function reports() {
# Send a report on NEW/BYHAND packages
log "Nagging ftpteam about NEW/BYHAND packages"
dak queue-report | mail -e -s "NEW and BYHAND on $(date +%D)" ftpmaster@ftp-master.debian.org
# and one on crufty packages
log "Sending information about crufty packages"
dak cruft-report > $webdir/cruft-report-daily.txt
dak cruft-report -s experimental >> $webdir/cruft-report-daily.txt
cat $webdir/cruft-report-daily.txt | mail -e -s "Debian archive cruft report for $(date +%D)" ftpmaster@ftp-master.debian.org
}
function dm() {
log "Updating DM html page"
$scriptsdir/dm-monitor >$webdir/dm-uploaders.html
}
function bts() {
log "Categorizing uncategorized bugs filed against ftp.debian.org"
dak bts-categorize
}
function merkel2() {
# Push dak@merkel so it syncs the projectb there. Returns immediately, the sync runs detached
log "Trigger merkel/flotows projectb sync"
ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_projectb dak@merkel.debian.org sleep 1
# Also trigger flotow, the ftpmaster test box
ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_flotow_projectb dak@flotow.debconf.org sleep 1
}
function merkel3() {
# Push dak@merkel to tell it to sync the dd accessible parts. Returns immediately, the sync runs detached
log "Trigger merkels dd accessible parts sync"
ssh -2 -o BatchMode=yes -o SetupTimeOut=30 -o ConnectTimeout=30 -i ~/.ssh/push_merkel_ddaccess dak@merkel.debian.org sleep 1
}
function mirrorpush() {
log "Starting the mirrorpush"
date -u > /srv/ftp.debian.org/web/mirrorstart
echo "Using dak v1" >> /srv/ftp.debian.org/web/mirrorstart
echo "Running on host $(hostname -f)" >> /srv/ftp.debian.org/web/mirrorstart
sudo -H -u archvsync /home/archvsync/runmirrors > ~dak/runmirrors.log 2>&1 &
}
function i18n2() {
log "Exporting package data foo for i18n project"
STAMP=$(date "+%Y%m%d%H%M")
mkdir -p ${scriptdir}/i18n/${STAMP}
cd ${scriptdir}/i18n/${STAMP}
dak control-suite -l stable > lenny
dak control-suite -l testing > squeeze
dak control-suite -l unstable > sid
echo "${STAMP}" > timestamp
gpg --secret-keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/secring.gpg --keyring /srv/ftp.debian.org/s3kr1t/dot-gnupg/pubring.gpg --no-options --batch --no-tty --armour --default-key 55BE302B --detach-sign -o timestamp.gpg timestamp
rm -f md5sum
md5sum * > md5sum
cd ${webdir}/
ln -sfT ${scriptdir}/i18n/${STAMP} i18n
cd ${scriptdir}
find ./i18n -mindepth 1 -maxdepth 1 -mtime +2 -not -name "${STAMP}" -type d -print0 | xargs --no-run-if-empty -0 rm -rf
}
function stats() {
log "Updating stats data"
cd $configdir
$scriptsdir/update-ftpstats $base/log/* > $base/misc/ftpstats.data
R --slave --vanilla < $base/misc/ftpstats.R
dak stats arch-space > $webdir/arch-space
dak stats pkg-nums > $webdir/pkg-nums
}
function aptftpcleanup() {
log "Clean up apt-ftparchive's databases"
cd $configdir
apt-ftparchive -q clean apt.conf
}
function compress() {
log "Compress old psql backups"
cd $base/backup/
find -maxdepth 1 -mindepth 1 -type f -name 'dump_pre_*' -mtime +2 -print0 | xargs -0 --no-run-if-empty rm
find -maxdepth 1 -mindepth 1 -type f -name 'dump_*' \! -name '*.bz2' \! -name '*.gz' -mmin +720 |
while read dumpname; do
echo "Compressing $dumpname"
bzip2 -9fv "$dumpname"
done
find -maxdepth 1 -mindepth 1 -type f -name "dumpall_*" \! -name '*.bz2' \! -name '*.gz' -mmin +720 |
while read dumpname; do
echo "Compressing $dumpname"
bzip2 -9fv "$dumpname"
done
finddup -l -d $base/backup
}
function logstats() {
$masterdir/tools/logs.py "$1"
}
# save timestamp when we start
function savetimestamp() {
NOW=`date "+%Y.%m.%d-%H:%M:%S"`
echo ${NOW} > "${dbdir}/dinstallstart"
}
function maillogfile() {
cat "$LOGFILE" | mail -s "Log for dinstall run of ${NOW}" cron@ftp-master.debian.org
}
function renamelogfile() {
if [ -f "${dbdir}/dinstallstart" ]; then
NOW=$(cat "${dbdir}/dinstallstart")
# maillogfile
mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
logstats "$logdir/dinstall_${NOW}.log"
bzip2 -9 "$logdir/dinstall_${NOW}.log"
else
error "Problem, I don't know when dinstall started, unable to do log statistics."
NOW=`date "+%Y.%m.%d-%H:%M:%S"`
# maillogfile
mv "$LOGFILE" "$logdir/dinstall_${NOW}.log"
bzip2 -9 "$logdir/dinstall_${NOW}.log"
fi
}
function testingsourcelist() {
dak ls -s testing -f heidi -r .| egrep 'source$' > ${webdir}/testing.list
}
# do a last run of process-unchecked before dinstall is on.
function process_unchecked() {
log "Processing the unchecked queue"
acceptnew
UNCHECKED_WITHOUT_LOCK="-p"
do_unchecked
sync_debbugs
}
# source the dinstall functions
. "${configdir}/dinstall.functions"
########################################################################
########################################################################
......@@ -521,6 +112,9 @@ function stage() {
# Make sure we are always at the same place.
cd ${configdir}
# We always use the same umask. If a function wants to do different, fine, but we reset.
umask 022
touch "${STAGEFILE}"
if [ -n "${TIME}" ]; then
......@@ -548,53 +142,19 @@ LOGFILE="$logdir/dinstall.log"
exec >> "$LOGFILE" 2>&1
# usually we are not using debug logs. Set to 1 if you want them.
DEBUG=0
# our name
PROGRAM="dinstall"
# where do we want mails to go? For example log entries made with error()
if [ "x$(hostname -s)x" != "xriesx" ]; then
# Not our ftpmaster host
MAILTO=${MAILTO:-"root"}
else
# Yay, ftpmaster
MAILTO=${MAILTO:-"ftpmaster@debian.org"}
fi
# And now source our default config
. "${configdir}/dinstall.variables"
# How many logfiles to keep
LOGROTATE=${LOGROTATE:-400}
# Make sure we start out with a sane umask setting
umask 022
# Marker for dinstall start
DINSTALLSTART="${lockdir}/dinstallstart"
# Marker for dinstall end
DINSTALLEND="${lockdir}/dinstallend"
# And use one locale, no matter what the caller has set
export LANG=C
export LC_ALL=C
touch "${DINSTALLSTART}"
ts "startup"
# Tell everyone we are doing some work
NOTICE="$ftpdir/Archive_Maintenance_In_Progress"
# lock cron.unchecked (it immediately exits when this exists)
LOCK_DAILY="$lockdir/daily.lock"
# Lock cron.unchecked from doing work
LOCK_ACCEPTED="$lockdir/unchecked.lock"
# Lock process-new from doing work
LOCK_NEW="$lockdir/processnew.lock"
# This file is simply used to indicate to britney whether or not
# the Packages file updates completed sucessfully. It's not a lock
# from our point of view
LOCK_BRITNEY="$lockdir/britney.lock"
# If this file exists we exit immediately after the currently running
# function is done
LOCK_STOP="$lockdir/archive.stop"
lockfile -l 3600 "${LOCK_DAILY}"
trap onerror ERR
trap cleanup EXIT TERM HUP INT QUIT
......@@ -609,14 +169,6 @@ GO=(
)
stage $GO
GO=(
FUNC="notice"
TIME=""
ARGS=""
ERR="false"
)
stage $GO
GO=(
FUNC="merkel1"
TIME="init"
......@@ -647,7 +199,8 @@ GO=(
ARGS="p-u-new"
ERR=""
)
stage $GO
### TODO: policy-new
#stage $GO
GO=(
FUNC="opunew"
......@@ -655,7 +208,8 @@ GO=(
ARGS="o-p-u-new"
ERR=""
)
stage $GO
### TODO: policy-new
#stage $GO
GO=(
FUNC="i18n1"
......@@ -670,29 +224,12 @@ lockfile "$LOCK_NEW"
GO=(
FUNC="process_unchecked"
TIME=""
TIME="unchecked"
ARGS=""
ERR=""
)
stage $GO
GO=(
FUNC="accepted"
TIME="accepted"
ARGS=""
ERR=""
)
stage $GO
GO=(
FUNC="buildd_dir"
TIME="buildd_dir"
ARGS=""
ERR="false"
)
stage $GO
GO=(
FUNC="cruft"
TIME="cruft"
......@@ -766,7 +303,8 @@ GO=(
ARGS=""
ERR=""
)
stage $GO
### TODO: clean-* fixup
#stage $GO
GO=(
FUNC="buildd"
......@@ -800,7 +338,6 @@ GO=(
)
stage $GO &
rm -f "${NOTICE}"
rm -f "${LOCK_DAILY}"
ts "locked part finished"
......
......@@ -59,7 +59,7 @@ cleanup() {
function do_buildd () {
if lockfile -r3 $NOTICE; then
LOCKDAILY="YES"
psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id = build_queue_files.build_queue_id) WHERE queue_name = 'accepted' AND filename ~ 'd(sc|eb)$';" > $dbdir/dists/unstable_accepted.list
psql projectb -A -t -q -c "SELECT build_queue.path || '/' || build_queue_files.filename FROM build_queue_files LEFT JOIN build_queue ON (build_queue.id = build_queue_files.build_queue_id) WHERE queue_name = 'buildd' AND filename ~ 'd(sc|eb)$';" > $dbdir/dists/unstable_accepted.list
cd $overridedir
dak make-overrides &>/dev/null
rm -f override.sid.all3 override.sid.all3.src
......
......@@ -36,7 +36,7 @@ echo "Splitting queue/done"
dak split-done > /dev/null
# Vacuum the database
echo "VACUUM; VACUUM ANALYZE;" | psql --no-psqlrc projectb 2>&1 | grep -v "^NOTICE: Skipping.*only table owner can VACUUM it$"
echo "VACUUM; VACUUM ANALYZE;" | psql --no-psqlrc projectb 2>&1
# Do git cleanup stuff
echo "Doing git stuff"
......
此差异已折叠。
# usually we are not using debug logs. Set to 1 if you want them.
DEBUG=0
# our name
PROGRAM="dinstall"
# where do we want mails to go? For example log entries made with error()
if [ "x$(hostname -s)x" != "xriesx" ]; then
# Not our ftpmaster host
MAILTO=${MAILTO:-"root"}
else
# Yay, ftpmaster
MAILTO=${MAILTO:-"ftpmaster@debian.org"}
fi
# How many logfiles to keep
LOGROTATE=${LOGROTATE:-400}
# Marker for dinstall start
DINSTALLSTART="${lockdir}/dinstallstart"
# Marker for dinstall end
DINSTALLEND="${lockdir}/dinstallend"
# lock cron.unchecked (it immediately exits when this exists)
LOCK_DAILY="$lockdir/daily.lock"
# Lock cron.unchecked from doing work
LOCK_ACCEPTED="$lockdir/unchecked.lock"
# Lock process-new from doing work
LOCK_NEW="$lockdir/processnew.lock"
# This file is simply used to indicate to britney whether or not
# the Packages file updates completed sucessfully. It's not a lock
# from our point of view
LOCK_BRITNEY="$lockdir/britney.lock"
# If this file exists we exit immediately after the currently running
# function is done
LOCK_STOP="$lockdir/archive.stop"
......@@ -11,11 +11,26 @@ lintian:
- usr-share-doc-symlink-without-dependency
- mknod-in-maintainer-script
- package-contains-info-dir-file
- copyright-lists-upstream-authors-with-dh_make-boilerplate
- binary-or-shlib-defines-rpath
- non-etc-file-marked-as-conffile
- embedded-zlib
- no-shlibs-control-file
- copyright-contains-dh_make-todo-boilerplate
- preinst-interpreter-without-predepends
- control-interpreter-without-depends
fatal:
- debian-control-file-uses-obsolete-national-encoding
- malformed-deb-archive
- bad-package-name
- no-architecture-field
- package-contains-ancient-file
- forbidden-postrm-interpreter
- control-interpreter-in-usr-local
- package-uses-local-diversion
- wrong-file-owner-uid-or-gid
- bad-relation
- FSSTND-dir-in-usr
- FSSTND-dir-in-var
- binary-in-etc
- missing-dependency-on-perlapi
- section-is-dh_make-template
......@@ -61,7 +76,6 @@ lintian:
- uploader-address-is-on-localhost
- no-source-field
- source-field-does-not-match-pkg-name
- section-is-dh_make-template
- build-depends-on-essential-package-without-using-version
- depends-on-build-essential-package-without-using-version
- build-depends-on-build-essential
......
......@@ -33,6 +33,7 @@ G{importgraph}
################################################################################
import os
import sys
import traceback
import daklib.utils
......@@ -73,6 +74,8 @@ def init():
"Generate lists of packages per suite for apt-ftparchive"),
("make-pkg-file-mapping",
"Generate package <-> file mapping"),
("generate-filelist",
"Generate file lists for apt-ftparchive"),
("generate-releases",
"Generate Release files"),
("contents",
......
#!/usr/bin/env python
"""
Add view for new generate_filelist command.
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2009 Torsten Werner <twerner@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import psycopg2
def do_update(self):
print "Add views for generate_filelist to database."
try:
c = self.db.cursor()
print "Drop old views."
c.execute("DROP VIEW IF EXISTS binfiles_suite_component_arch CASCADE")
c.execute("DROP VIEW IF EXISTS srcfiles_suite_component CASCADE")
print "Create new views."
c.execute("""
CREATE VIEW binfiles_suite_component_arch AS
SELECT files.filename, binaries.type, location.path, location.component,
bin_associations.suite, binaries.architecture
FROM binaries
JOIN bin_associations ON binaries.id = bin_associations.bin
JOIN files ON binaries.file = files.id
JOIN location ON files.location = location.id;
""")
c.execute("""
CREATE VIEW srcfiles_suite_component AS
SELECT files.filename, location.path, location.component,
src_associations.suite
FROM source
JOIN src_associations ON source.id = src_associations.source
JOIN files ON source.file = files.id
JOIN location ON files.location = location.id;
""")
print "Committing"
c.execute("UPDATE config SET value = '23' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.InternalError, msg:
self.db.rollback()
raise DBUpdateError, "Database error, rollback issued. Error message : %s" % (str(msg))
#!/usr/bin/python
"""
Generate file lists for apt-ftparchive.
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2009 Torsten Werner <twerner@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from daklib.dbconn import *
from daklib.config import Config
from daklib import utils
import apt_pkg, os, sys
def fetch(query, args, session):
return [path + filename for (path, filename) in \
session.execute(query, args).fetchall()]
def getSources(suite, component, session):
query = """
SELECT path, filename
FROM srcfiles_suite_component
WHERE suite = :suite AND component = :component
"""
args = { 'suite': suite.suite_id,
'component': component.component_id }
return fetch(query, args, session)
def getBinaries(suite, component, architecture, type, session):
query = """
SELECT path, filename
FROM binfiles_suite_component_arch
WHERE suite = :suite AND component = :component AND type = :type AND
(architecture = :architecture OR architecture = 2)
"""
args = { 'suite': suite.suite_id,
'component': component.component_id,
'architecture': architecture.arch_id,
'type': type }
return fetch(query, args, session)
def listPath(suite, component, architecture = None, type = None):
"""returns full path to the list file"""
suffixMap = { 'deb': "binary-",
'udeb': "debian-installer_binary-" }
if architecture:
suffix = suffixMap[type] + architecture.arch_string
else:
suffix = "source"
filename = "%s_%s_%s.list" % \
(suite.suite_name, component.component_name, suffix)
pathname = os.path.join(Config()["Dir::Lists"], filename)
return utils.open_file(pathname, "w")
def writeSourceList(suite, component, session):
file = listPath(suite, component)
for filename in getSources(suite, component, session):
file.write(filename + '\n')
file.close()
def writeBinaryList(suite, component, architecture, type, session):
file = listPath(suite, component, architecture, type)
for filename in getBinaries(suite, component, architecture, type, session):
file.write(filename + '\n')
file.close()
def usage():
print """Usage: dak generate_filelist [OPTIONS]
Create filename lists for apt-ftparchive.
-s, --suite=SUITE act on this suite
-c, --component=COMPONENT act on this component
-a, --architecture=ARCH act on this architecture
-h, --help show this help and exit
ARCH, COMPONENT and SUITE can be comma (or space) separated list, e.g.
--suite=testing,unstable"""
sys.exit()
def main():
cnf = Config()
Arguments = [('h', "help", "Filelist::Options::Help"),
('s', "suite", "Filelist::Options::Suite", "HasArg"),
('c', "component", "Filelist::Options::Component", "HasArg"),
('a', "architecture", "Filelist::Options::Architecture", "HasArg")]
query_suites = DBConn().session().query(Suite)
suites = [suite.suite_name for suite in query_suites.all()]
if not cnf.has_key('Filelist::Options::Suite'):
cnf['Filelist::Options::Suite'] = ','.join(suites)
# we can ask the database for components if 'mixed' is gone
if not cnf.has_key('Filelist::Options::Component'):
cnf['Filelist::Options::Component'] = 'main,contrib,non-free'
query_architectures = DBConn().session().query(Architecture)
architectures = \
[architecture.arch_string for architecture in query_architectures.all()]
if not cnf.has_key('Filelist::Options::Architecture'):
cnf['Filelist::Options::Architecture'] = ','.join(architectures)
cnf['Filelist::Options::Help'] = ''
apt_pkg.ParseCommandLine(cnf.Cnf, Arguments, sys.argv)
Options = cnf.SubTree("Filelist::Options")
if Options['Help']:
usage()
session = DBConn().session()
suite_arch = session.query(SuiteArchitecture)
for suite_name in utils.split_args(Options['Suite']):
suite = query_suites.filter_by(suite_name = suite_name).one()
join = suite_arch.filter_by(suite_id = suite.suite_id)
for component_name in utils.split_args(Options['Component']):
component = session.query(Component).\
filter_by(component_name = component_name).one()
for architecture_name in utils.split_args(Options['Architecture']):
architecture = query_architectures.\
filter_by(arch_string = architecture_name).one()
try:
join.filter_by(arch_id = architecture.arch_id).one()
if architecture_name == 'source':
writeSourceList(suite, component, session)
elif architecture_name != 'all':
writeBinaryList(suite, component, architecture, 'deb', session)
writeBinaryList(suite, component, architecture, 'udeb', session)
except:
pass
# this script doesn't change the database
session.rollback()
if __name__ == '__main__':
main()
......@@ -91,7 +91,7 @@ def main ():
session = DBConn().session()
# If cron.daily is running; warn the user that our output might seem strange
if os.path.exists(os.path.join(cnf["Dir::Root"], "Archive_Maintenance_In_Progress")):
if os.path.exists(os.path.join(cnf["Dir::Lock"], "daily.lock")):
utils.warn("Archive maintenance is in progress; database inconsistencies are possible.")
# Handle buildd maintenance helper options
......
......@@ -25,7 +25,7 @@ import apt_pkg, os, sys, pwd, time, commands
from daklib import queue
from daklib import daklog
from daklib import utils
from daklib.dbconn import DBConn, get_or_set_queue, get_suite_architectures
from daklib.dbconn import DBConn, get_build_queue, get_suite_architectures
from daklib.regexes import re_taint_free
Cnf = None
......@@ -495,8 +495,8 @@ def _do_Disembargo():
session = DBConn().session()
dest = Cnf["Dir::Queue::Unembargoed"]
emb_q = get_or_set_queue("embargoed", session)
une_q = get_or_set_queue("unembargoed", session)
emb_q = get_build_queue("embargoed", session)
une_q = get_build_queue("unembargoed", session)
for c in changes:
print "Disembargoing %s" % (c)
......
......@@ -898,6 +898,10 @@ def end():
def main():
global Options, Logger, Sections, Priorities
print "NO NEW PROCESSING CURRENTLY AVAILABLE"
print "(Go and do something more interesting)"
sys.exit(0)
cnf = Config()
session = DBConn().session()
......
......@@ -291,7 +291,7 @@ def action(u, session):
elif answer == 'P':
if not chg:
chg = u.pkg.add_known_changes(holding.holding_dir, session)
package_to_queue(u, summary, short_summary, policy_queue, chg, session)
package_to_queue(u, summary, short_summary, policyqueue, chg, session)
session.commit()
u.remove()
elif answer == queuekey:
......@@ -477,8 +477,8 @@ def main():
sets = "set"
if summarystats.accept_count > 1:
sets = "sets"
sys.stderr.write("Installed %d package %s, %s.\n" % (summarystats.accept_count, sets,
utils.size_type(int(summarystats.accept_bytes))))
print "Installed %d package %s, %s." % (summarystats.accept_count, sets,
utils.size_type(int(summarystats.accept_bytes)))
Logger.log(["total", summarystats.accept_count, summarystats.accept_bytes])
if not Options["No-Action"]:
......
......@@ -45,7 +45,7 @@ from daklib.dak_exceptions import DBUpdateError
################################################################################
Cnf = None
required_database_schema = 22
required_database_schema = 23
################################################################################
......
......@@ -434,7 +434,7 @@ class BuildQueue(object):
pass
def __repr__(self):
return '<Queue %s>' % self.queue_name
return '<BuildQueue %s>' % self.queue_name
def add_file_from_pool(self, poolfile):
"""Copies a file into the pool. Assumes that the PoolFile object is
......@@ -450,7 +450,7 @@ class BuildQueue(object):
# In this case, update the BuildQueueFile entry so we
# don't remove it too early
f.lastused = datetime.now()
DBConn().session().object_session(pf).add(f)
DBConn().session().object_session(poolfile).add(f)
return f
# Prepare BuildQueueFile object
......@@ -2062,6 +2062,11 @@ def add_dsc_to_db(u, filename, session=None):
poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
pfs.append(poolfile)
files_id = poolfile.file_id
else:
poolfile = get_poolfile_by_id(files_id, session)
if poolfile is None:
utils.fubar("INTERNAL ERROR. Found no poolfile with id %d" % files_id)
pfs.append(poolfile)
df.poolfile_id = files_id
session.add(df)
......
#! /bin/sh
set -e
. $SCRIPTVARS
echo 'Copying override files into public view ...'
for f in $copyoverrides ; do
cd $overridedir
chmod g+w override.$f
cd $indices
rm -f .newover-$f.gz
pc="`gzip 2>&1 -9nv <$overridedir/override.$f >.newover-$f.gz`"
set +e
nf=override.$f.gz
cmp -s .newover-$f.gz $nf
rc=$?
set -e
if [ $rc = 0 ]; then
rm -f .newover-$f.gz
elif [ $rc = 1 -o ! -f $nf ]; then
echo " installing new $nf $pc"
mv -f .newover-$f.gz $nf
chmod g+w $nf
else
echo $? $pc
exit 1
fi
done
#!/bin/sh
# Update the md5sums file
set -e
. $SCRIPTVARS
dsynclist=$dbdir/dsync.list
md5list=$indices/md5sums
echo -n "Creating md5 / dsync index file ... "
cd "$ftpdir"
${bindir}/dsync-flist -q generate $dsynclist --exclude $dsynclist --md5
${bindir}/dsync-flist -q md5sums $dsynclist | gzip -9n > ${md5list}.gz
${bindir}/dsync-flist -q link-dups $dsynclist || true
#!/bin/sh -e
export SCRIPTVARS=/srv/ftp.debian.org/dak/config/debian/vars
. $SCRIPTVARS
umask 002
cd $base/ftp/indices/files/components
ARCHLIST=$(tempfile)
echo "Querying projectb..."
echo 'SELECT l.path, f.filename, a.arch_string FROM location l JOIN files f ON (f.location = l.id) LEFT OUTER JOIN (binaries b JOIN architecture a ON (b.architecture = a.id)) ON (f.id = b.file)' | psql projectb -At | sed 's/|//;s,^/srv/ftp.debian.org/ftp,.,' | sort >$ARCHLIST
includedirs () {
perl -ne 'print; while (m,/[^/]+$,) { $_=$`; print $_ . "\n" unless $d{$_}++; }'
}
poolfirst () {
perl -e '@nonpool=(); while (<>) { if (m,^\./pool/,) { print; } else { push @nonpool, $_; } } print for (@nonpool);'
}
echo "Generating sources list..."
(
sed -n 's/|$//p' $ARCHLIST
cd $base/ftp
find ./dists -maxdepth 1 \! -type d
find ./dists \! -type d | grep "/source/"
) | sort -u | gzip --rsyncable -9 > source.list.gz
echo "Generating arch lists..."
ARCHES=$( (<$ARCHLIST sed -n 's/^.*|//p'; echo amd64) | grep . | grep -v all | sort -u)
for a in $ARCHES; do
(sed -n "s/|$a$//p" $ARCHLIST
sed -n 's/|all$//p' $ARCHLIST
cd $base/ftp
find ./dists -maxdepth 1 \! -type d
find ./dists \! -type d | grep -E "(proposed-updates.*_$a.changes$|/main/disks-$a/|/main/installer-$a/|/Contents-$a|/binary-$a/)"
) | sort -u | gzip --rsyncable -9 > arch-$a.list.gz
done
echo "Generating suite lists..."
suite_list () {
printf 'SELECT DISTINCT l.path, f.filename FROM (SELECT sa.source AS source FROM src_associations sa WHERE sa.suite = %d UNION SELECT b.source AS source FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) WHERE ba.suite = %d) s JOIN dsc_files df ON (s.source = df.source) JOIN files f ON (df.file = f.id) JOIN location l ON (f.location = l.id)\n' $1 $1 | psql -F' ' -A -t projectb
printf 'SELECT l.path, f.filename FROM bin_associations ba JOIN binaries b ON (ba.bin = b.id) JOIN files f ON (b.file = f.id) JOIN location l ON (f.location = l.id) WHERE ba.suite = %d\n' $1 | psql -F' ' -A -t projectb
}
printf 'SELECT id, suite_name FROM suite\n' | psql -F' ' -At projectb |
while read id suite; do
[ -e $base/ftp/dists/$suite ] || continue
(
(cd $base/ftp
distname=$(cd dists; readlink $suite || echo $suite)
find ./dists/$distname \! -type d
for distdir in ./dists/*; do
[ "$(readlink $distdir)" != "$distname" ] || echo $distdir
done
)
suite_list $id | tr -d ' ' | sed 's,^/srv/ftp.debian.org/ftp,.,'
) | sort -u | gzip --rsyncable -9 > suite-${suite}.list.gz
done
echo "Finding everything on the ftp site to generate sundries $(date +"%X")..."
(cd $base/ftp; find . \! -type d \! -name 'Archive_Maintenance_In_Progress' | sort) >$ARCHLIST
rm -f sundries.list
zcat *.list.gz | cat - *.list | sort -u |
diff - $ARCHLIST | sed -n 's/^> //p' > sundries.list
echo "Generating files list $(date +"%X")..."
for a in $ARCHES; do
(echo ./project/trace; zcat arch-$a.list.gz source.list.gz) |
cat - sundries.list dists.list project.list docs.list indices.list |
sort -u | poolfirst > ../arch-$a.files
done
(cd $base/ftp/
for dist in sid squeeze; do
find ./dists/$dist/main/i18n/ \! -type d | sort -u | gzip --rsyncable -9 > $base/ftp/indices/files/components/translation-$dist.list.gz
done
)
(cat ../arch-i386.files ../arch-amd64.files; zcat suite-oldstable.list.gz suite-proposed-updates.list.gz ; zcat translation-sid.list.gz ; zcat translation-squeeze.list.gz) |
sort -u | poolfirst > ../typical.files
rm -f $ARCHLIST
echo "Done!"
#!/bin/sh
# Update the ls-lR.
set -e
. $SCRIPTVARS
cd $ftpdir
filename=ls-lR
echo "Removing any core files ..."
find -type f -name core -print0 | xargs -0r rm -v
echo "Checking permissions on files in the FTP tree ..."
find -type f \( \! -perm -444 -o -perm +002 \) -ls
find -type d \( \! -perm -555 -o -perm +002 \) -ls
echo "Checking symlinks ..."
symlinks -rd .
echo "Creating recursive directory listing ... "
rm -f .$filename.new
TZ=UTC ls -lR | grep -v Archive_Maintenance_In_Progress > .$filename.new
if [ -r ${filename}.gz ] ; then
mv -f ${filename}.gz $filename.old.gz
mv -f .$filename.new $filename
rm -f $filename.patch.gz
zcat $filename.old.gz | diff -u - $filename | gzip --rsyncable -9cfn - >$filename.patch.gz
rm -f $filename.old.gz
else
mv -f .$filename.new $filename
fi
gzip --rsyncable -9cfN $filename >$filename.gz
rm -f $filename
#! /bin/sh
echo
echo -n 'Creating Maintainers index ... '
set -e
. $SCRIPTVARS
cd $base/misc/
cd $indices
dak make-maintainers ${scriptdir}/masterfiles/pseudo-packages.maintainers | sed -e "s/~[^ ]*\([ ]\)/\1/" | awk '{printf "%-20s ", $1; for (i=2; i<=NF; i++) printf "%s ", $i; printf "\n";}' > .new-maintainers
set +e
cmp .new-maintainers Maintainers >/dev/null
rc=$?
set -e
if [ $rc = 1 ] || [ ! -f Maintainers ] ; then
echo -n "installing Maintainers ... "
mv -f .new-maintainers Maintainers
gzip --rsyncable -9v <Maintainers >.new-maintainers.gz
mv -f .new-maintainers.gz Maintainers.gz
elif [ $rc = 0 ] ; then
echo '(same as before)'
rm -f .new-maintainers
else
echo cmp returned $rc
false
fi
#!/usr/bin/env python
from base_test import DakTestCase, DAK_ROOT_DIR
import glob
import unittest
from os.path import join, basename, splitext
class ImportTestCase(DakTestCase):
for filename in glob.glob(join(DAK_ROOT_DIR, 'dak', '*.py')):
cmd, ext = splitext(basename(filename))
def test_fn(self, cmd=cmd):
__import__('dak', fromlist=[cmd])
locals()['test_importing_%s' % cmd] = test_fn
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册