提交 8c687585 编写于 作者: M Mark Hymers

Merge commit 'djpig/process-upload'

Also remove p-a and p-unchecked to avoid confusion

Conflicts:
	daklib/dbconn.py
	daklib/queue.py
Signed-off-by: NMark Hymers <mhy@debian.org>
......@@ -66,10 +66,8 @@ def init():
("process-new",
"Process NEW and BYHAND packages"),
("process-unchecked",
("process-upload",
"Process packages in queue/unchecked"),
("process-accepted",
"Install packages into the pool"),
("make-suite-file-list",
"Generate lists of packages per suite for apt-ftparchive"),
......
此差异已折叠。
......@@ -177,198 +177,51 @@ class Changes(object):
return summary
@session_wrapper
def remove_known_changes(self, session=None):
if session is None:
session = DBConn().session()
privatetrans = True
session.delete(get_knownchange(self.changes_file, session))
if privatetrans:
session.commit()
session.close()
def mark_missing_fields(self):
"""add "missing" in fields which we will require for the known_changes table"""
for key in ['urgency', 'maintainer', 'fingerprint', 'changed-by' ]:
if (not self.changes.has_key(key)) or (not self.changes[key]):
self.changes[key]='missing'
@session_wrapper
def add_known_changes(self, dirpath, session=None):
"""add "missing" in fields which we will require for the known_changes table"""
cnf = Config()
privatetrans = False
if session is None:
session = DBConn().session()
privatetrans = True
changesfile = os.path.join(dirpath, self.changes_file)
filetime = datetime.datetime.fromtimestamp(os.path.getctime(changesfile))
self.mark_missing_fields()
multivalues = {}
for key in ("distribution", "architecture", "binary"):
if isinstance(self.changes[key], dict):
multivalues[key] = ", ".join(self.changes[key].keys())
else:
multivalues[key] = self.changes[key].keys()
session.execute(
"""INSERT INTO known_changes
(changesname, seen, source, binaries, architecture, version,
distribution, urgency, maintainer, fingerprint, changedby, date)
VALUES (:changesfile,:filetime,:source,:binary, :architecture,
:version,:distribution,:urgency,:maintainer,:fingerprint,:changedby,:date)""",
{ 'changesfile':self.changes_file,
'filetime':filetime,
'source':self.changes["source"],
'binary':self.changes["binary"],
'architecture':self.changes["architecture"],
'version':self.changes["version"],
'distribution':self.changes["distribution"],
'urgency':self.changes["urgency"],
'maintainer':self.changes["maintainer"],
'fingerprint':self.changes["fingerprint"],
'changedby':self.changes["changed-by"],
'date':self.changes["date"]} )
if privatetrans:
session.commit()
session.close()
def load_dot_dak(self, changesfile):
"""
Update ourself by reading a previously created cPickle .dak dumpfile.
"""
self.changes_file = changesfile
dump_filename = self.changes_file[:-8]+".dak"
dump_file = open_file(dump_filename)
p = Unpickler(dump_file)
self.changes.update(p.load())
self.dsc.update(p.load())
self.files.update(p.load())
self.dsc_files.update(p.load())
next_obj = p.load()
if isinstance(next_obj, dict):
self.orig_files.update(next_obj)
else:
# Auto-convert old dak files to new format supporting
# multiple tarballs
orig_tar_gz = None
for dsc_file in self.dsc_files.keys():
if dsc_file.endswith(".orig.tar.gz"):
orig_tar_gz = dsc_file
self.orig_files[orig_tar_gz] = {}
if next_obj != None:
self.orig_files[orig_tar_gz]["id"] = next_obj
next_obj = p.load()
if next_obj != None and next_obj != "":
self.orig_files[orig_tar_gz]["location"] = next_obj
if len(self.orig_files[orig_tar_gz]) == 0:
del self.orig_files[orig_tar_gz]
dump_file.close()
def sanitised_files(self):
ret = {}
for name, entry in self.files.items():
ret[name] = {}
for i in CHANGESFIELDS_FILES:
if entry.has_key(i):
ret[name][i] = entry[i]
return ret
def sanitised_changes(self):
ret = {}
# Mandatory changes fields
for i in CHANGESFIELDS_MANDATORY:
ret[i] = self.changes[i]
# Optional changes fields
for i in CHANGESFIELDS_OPTIONAL:
if self.changes.has_key(i):
ret[i] = self.changes[i]
return ret
def sanitised_dsc(self):
ret = {}
for i in CHANGESFIELDS_DSC:
if self.dsc.has_key(i):
ret[i] = self.dsc[i]
return ret
def sanitised_dsc_files(self):
ret = {}
for name, entry in self.dsc_files.items():
ret[name] = {}
# Mandatory dsc_files fields
for i in CHANGESFIELDS_DSCFILES_MANDATORY:
ret[name][i] = entry[i]
# Optional dsc_files fields
for i in CHANGESFIELDS_DSCFILES_OPTIONAL:
if entry.has_key(i):
ret[name][i] = entry[i]
return ret
def sanitised_orig_files(self):
ret = {}
for name, entry in self.orig_files.items():
ret[name] = {}
# Optional orig_files fields
for i in CHANGESFIELDS_ORIGFILES:
if entry.has_key(i):
ret[name][i] = entry[i]
return ret
def write_dot_dak(self, dest_dir):
"""
Dump ourself into a cPickle file.
@type dest_dir: string
@param dest_dir: Path where the dumpfile should be stored
@note: This could just dump the dictionaries as is, but I'd like to avoid this so
there's some idea of what process-accepted & process-new use from
process-unchecked. (JT)
"""
dump_filename = os.path.join(dest_dir, self.changes_file[:-8] + ".dak")
dump_file = open_file(dump_filename, 'w')
try:
os.chmod(dump_filename, 0664)
except OSError, e:
# chmod may fail when the dumpfile is not owned by the user
# invoking dak (like e.g. when NEW is processed by a member
# of ftpteam)
if e.errno == EPERM:
perms = stat.S_IMODE(os.stat(dump_filename)[stat.ST_MODE])
# security precaution, should never happen unless a weird
# umask is set anywhere
if perms & stat.S_IWOTH:
fubar("%s is world writable and chmod failed." % \
(dump_filename,))
# ignore the failed chmod otherwise as the file should
# already have the right privileges and is just, at worst,
# unreadable for world
else:
raise
p = Pickler(dump_file, 1)
p.dump(self.sanitised_changes())
p.dump(self.sanitised_dsc())
p.dump(self.sanitised_files())
p.dump(self.sanitised_dsc_files())
p.dump(self.sanitised_orig_files())
dump_file.close()
{ 'changesfile': self.changes_file,
'filetime': filetime,
'source': self.changes["source"],
'binary': multivalues["binary"],
'architecture': multivalues["architecture"],
'version': self.changes["version"],
'distribution': multivalues["distribution"],
'urgency': self.changes["urgency"],
'maintainer': self.changes["maintainer"],
'fingerprint': self.changes["fingerprint"],
'changedby': self.changes["changed-by"],
'date': self.changes["date"]} )
def unknown_files_fields(self, name):
return sorted(list( set(self.files[name].keys()) -
......
......@@ -37,7 +37,7 @@ import os
import re
import psycopg2
import traceback
import datetime
from datetime import datetime
from inspect import getargspec
......@@ -125,6 +125,8 @@ def session_wrapper(fn):
return wrapped
__all__.append('session_wrapper')
################################################################################
class Architecture(object):
......@@ -850,6 +852,39 @@ def get_poolfile_like_name(filename, session=None):
__all__.append('get_poolfile_like_name')
@session_wrapper
def add_poolfile(filename, datadict, location_id, session=None):
"""
Add a new file to the pool
@type filename: string
@param filename: filename
@type datadict: dict
@param datadict: dict with needed data
@type location_id: int
@param location_id: database id of the location
@rtype: PoolFile
@return: the PoolFile object created
"""
poolfile = PoolFile()
poolfile.filename = filename
poolfile.filesize = datadict["size"]
poolfile.md5sum = datadict["md5sum"]
poolfile.sha1sum = datadict["sha1sum"]
poolfile.sha256sum = datadict["sha256sum"]
poolfile.location_id = location_id
session.add(poolfile)
# Flush to get a file id (NB: This is not a commit)
session.flush()
return poolfile
__all__.append('add_poolfile')
################################################################################
class Fingerprint(object):
......@@ -1895,6 +1930,174 @@ __all__.append('get_source_in_suite')
################################################################################
@session_wrapper
def add_dsc_to_db(u, filename, session=None):
entry = u.pkg.files[filename]
source = DBSource()
source.source = u.pkg.dsc["source"]
source.version = u.pkg.dsc["version"] # NB: not files[file]["version"], that has no epoch
source.maintainer_id = get_or_set_maintainer(u.pkg.dsc["maintainer"], session).maintainer_id
source.changedby_id = get_or_set_maintainer(u.pkg.changes["changed-by"], session).maintainer_id
source.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
source.install_date = datetime.now().date()
dsc_component = entry["component"]
dsc_location_id = entry["location id"]
source.dm_upload_allowed = (u.pkg.dsc.get("dm-upload-allowed", '') == "yes")
# Set up a new poolfile if necessary
if not entry.has_key("files id") or not entry["files id"]:
filename = entry["pool name"] + filename
poolfile = add_poolfile(filename, entry, dsc_location_id, session)
session.flush()
entry["files id"] = poolfile.file_id
source.poolfile_id = entry["files id"]
session.add(source)
session.flush()
for suite_name in u.pkg.changes["distribution"].keys():
sa = SrcAssociation()
sa.source_id = source.source_id
sa.suite_id = get_suite(suite_name).suite_id
session.add(sa)
session.flush()
# Add the source files to the DB (files and dsc_files)
dscfile = DSCFile()
dscfile.source_id = source.source_id
dscfile.poolfile_id = entry["files id"]
session.add(dscfile)
for dsc_file, dentry in u.pkg.dsc_files.items():
df = DSCFile()
df.source_id = source.source_id
# If the .orig tarball is already in the pool, it's
# files id is stored in dsc_files by check_dsc().
files_id = dentry.get("files id", None)
# Find the entry in the files hash
# TODO: Bail out here properly
dfentry = None
for f, e in u.pkg.files.items():
if f == dsc_file:
dfentry = e
break
if files_id is None:
filename = dfentry["pool name"] + dsc_file
(found, obj) = check_poolfile(filename, dentry["size"], dentry["md5sum"], dsc_location_id)
# FIXME: needs to check for -1/-2 and or handle exception
if found and obj is not None:
files_id = obj.file_id
# If still not found, add it
if files_id is None:
# HACK: Force sha1sum etc into dentry
dentry["sha1sum"] = dfentry["sha1sum"]
dentry["sha256sum"] = dfentry["sha256sum"]
poolfile = add_poolfile(filename, dentry, dsc_location_id, session)
files_id = poolfile.file_id
df.poolfile_id = files_id
session.add(df)
session.flush()
# Add the src_uploaders to the DB
uploader_ids = [source.maintainer_id]
if u.pkg.dsc.has_key("uploaders"):
for up in u.pkg.dsc["uploaders"].split(","):
up = up.strip()
uploader_ids.append(get_or_set_maintainer(up, session).maintainer_id)
added_ids = {}
for up in uploader_ids:
if added_ids.has_key(up):
utils.warn("Already saw uploader %s for source %s" % (up, source.source))
continue
added_ids[u]=1
su = SrcUploader()
su.maintainer_id = up
su.source_id = source.source_id
session.add(su)
session.flush()
return dsc_component, dsc_location_id
__all__.append('add_dsc_to_db')
@session_wrapper
def add_deb_to_db(u, filename, session=None):
"""
Contrary to what you might expect, this routine deals with both
debs and udebs. That info is in 'dbtype', whilst 'type' is
'deb' for both of them
"""
cnf = Config()
entry = u.pkg.files[filename]
bin = DBBinary()
bin.package = entry["package"]
bin.version = entry["version"]
bin.maintainer_id = get_or_set_maintainer(entry["maintainer"], session).maintainer_id
bin.fingerprint_id = get_or_set_fingerprint(u.pkg.changes["fingerprint"], session).fingerprint_id
bin.arch_id = get_architecture(entry["architecture"], session).arch_id
bin.binarytype = entry["dbtype"]
# Find poolfile id
filename = entry["pool name"] + filename
fullpath = os.path.join(cnf["Dir::Pool"], filename)
if not entry.get("location id", None):
entry["location id"] = get_location(cnf["Dir::Pool"], entry["component"], utils.where_am_i(), session).location_id
if not entry.get("files id", None):
poolfile = add_poolfile(filename, entry, entry["location id"], session)
entry["files id"] = poolfile.file_id
bin.poolfile_id = entry["files id"]
# Find source id
bin_sources = get_sources_from_name(entry["source package"], entry["source version"], session=session)
if len(bin_sources) != 1:
raise NoSourceFieldError, "Unable to find a unique source id for %s (%s), %s, file %s, type %s, signed by %s" % \
(bin.package, bin.version, bin.architecture.arch_string,
filename, bin.binarytype, u.pkg.changes["fingerprint"])
bin.source_id = bin_sources[0].source_id
# Add and flush object so it has an ID
session.add(bin)
session.flush()
# Add BinAssociations
for suite_name in u.pkg.changes["distribution"].keys():
ba = BinAssociation()
ba.binary_id = bin.binary_id
ba.suite_id = get_suite(suite_name).suite_id
session.add(ba)
session.flush()
# Deal with contents - disabled for now
#contents = copy_temporary_contents(bin.package, bin.version, bin.architecture.arch_string, os.path.basename(filename), None, session)
#if not contents:
# print "REJECT\nCould not determine contents of package %s" % bin.package
# session.rollback()
# raise MissingContents, "No contents stored for package %s, and couldn't determine contents of %s" % (bin.package, filename)
__all__.append('add_deb_to_db')
################################################################################
class SourceACL(object):
def __init__(self, *args, **kwargs):
pass
......
......@@ -38,6 +38,8 @@ import commands
import shutil
import textwrap
from types import *
from sqlalchemy.sql.expression import desc
from sqlalchemy.orm.exc import NoResultFound
import yaml
......@@ -46,6 +48,7 @@ from changes import *
from regexes import *
from config import Config
from holding import Holding
from urgencylog import UrgencyLog
from dbconn import *
from summarystats import SummaryStats
from utils import parse_changes, check_dsc_files
......@@ -285,6 +288,7 @@ class Upload(object):
for title, messages in msgs:
if messages:
msg += '\n\n%s:\n%s' % (title, '\n'.join(messages))
msg += '\n'
return msg
......@@ -796,17 +800,11 @@ class Upload(object):
entry["othercomponents"] = res.fetchone()[0]
def check_files(self, action=True):
archive = utils.where_am_i()
file_keys = self.pkg.files.keys()
holding = Holding()
cnf = Config()
# XXX: As far as I can tell, this can no longer happen - see
# comments by AJ in old revisions - mhy
# if reprocess is 2 we've already done this and we're checking
# things again for the new .orig.tar.gz.
# [Yes, I'm fully aware of how disgusting this is]
if action and self.reprocess < 2:
if action:
cwd = os.getcwd()
os.chdir(self.pkg.directory)
for f in file_keys:
......@@ -817,36 +815,28 @@ class Upload(object):
os.chdir(cwd)
# Check there isn't already a .changes or .dak file of the same name in
# the proposed-updates "CopyChanges" or "CopyDotDak" storage directories.
# check we already know the changes file
# [NB: this check must be done post-suite mapping]
base_filename = os.path.basename(self.pkg.changes_file)
dot_dak_filename = base_filename[:-8] + ".dak"
for suite in self.pkg.changes["distribution"].keys():
copychanges = "Suite::%s::CopyChanges" % (suite)
if cnf.has_key(copychanges) and \
os.path.exists(os.path.join(cnf[copychanges], base_filename)):
self.rejects.append("%s: a file with this name already exists in %s" \
% (base_filename, cnf[copychanges]))
copy_dot_dak = "Suite::%s::CopyDotDak" % (suite)
if cnf.has_key(copy_dot_dak) and \
os.path.exists(os.path.join(cnf[copy_dot_dak], dot_dak_filename)):
self.rejects.append("%s: a file with this name already exists in %s" \
% (dot_dak_filename, Cnf[copy_dot_dak]))
self.reprocess = 0
session = DBConn().session()
try:
changes = session.query(KnownChange).filter_by(changesname=base_filename).one()
if not changes.approved_for:
self.rejects.append("%s file already known to dak" % base_filename)
except NoResultFound, e:
# not known, good
pass
has_binaries = False
has_source = False
session = DBConn().session()
for f, entry in self.pkg.files.items():
# Ensure the file does not already exist in one of the accepted directories
for d in [ "Accepted", "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
for d in [ "Byhand", "New", "ProposedUpdates", "OldProposedUpdates", "Embargoed", "Unembargoed" ]:
if not cnf.has_key("Dir::Queue::%s" % (d)): continue
if os.path.exists(cnf["Dir::Queue::%s" % (d) ] + '/' + f):
if os.path.exists(os.path.join(cnf["Dir::Queue::%s" % (d) ], f)):
self.rejects.append("%s file already exists in the %s directory." % (f, d))
if not re_taint_free.match(f):
......@@ -1084,15 +1074,10 @@ class Upload(object):
self.rejects.append("%s: changelog format not recognised (empty version tree)." % (dsc_filename))
def check_source(self):
# XXX: I'm fairly sure reprocess == 2 can never happen
# AJT disabled the is_incoming check years ago - mhy
# We should probably scrap or rethink the whole reprocess thing
# Bail out if:
# a) there's no source
# or b) reprocess is 2 - we will do this check next time when orig
# tarball is in 'files'
# or c) the orig files are MIA
if not self.pkg.changes["architecture"].has_key("source") or self.reprocess == 2 \
if not self.pkg.changes["architecture"].has_key("source") \
or len(self.pkg.orig_files) == 0:
return
......@@ -1493,7 +1478,7 @@ class Upload(object):
# or binary, whereas keys with no access might be able to
# upload some binaries)
if fpr.source_acl.access_level == 'dm':
self.check_dm_source_upload(fpr, session)
self.check_dm_upload(fpr, session)
else:
# Check source-based permissions for other types
if self.pkg.changes["architecture"].has_key("source"):
......@@ -1837,13 +1822,13 @@ distribution."""
return summary
###########################################################################
def accept (self, summary, short_summary, targetdir=None):
@session_wrapper
def accept (self, summary, short_summary, session=None):
"""
Accept an upload.
This moves all files referenced from the .changes into the I{accepted}
queue, sends the accepted mail, announces to lists, closes bugs and
This moves all files referenced from the .changes into the pool,
sends the accepted mail, announces to lists, closes bugs and
also checks for override disparities. If enabled it will write out
the version history for the BTS Version Tracking and will finally call
L{queue_build}.
......@@ -1853,31 +1838,84 @@ distribution."""
@type short_summary: string
@param short_summary: Short summary
"""
cnf = Config()
stats = SummaryStats()
accepttemplate = os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted')
print "Installing."
self.logger.log(["installing changes", self.pkg.changes_file])
if targetdir is None:
targetdir = cnf["Dir::Queue::Accepted"]
# Add the .dsc file to the DB first
for newfile, entry in self.pkg.files.items():
if entry["type"] == "dsc":
dsc_component, dsc_location_id = add_dsc_to_db(self, newfile, session)
print "Accepting."
if self.logger:
self.logger.log(["Accepting changes", self.pkg.changes_file])
# Add .deb / .udeb files to the DB (type is always deb, dbtype is udeb/deb)
for newfile, entry in self.pkg.files.items():
if entry["type"] == "deb":
add_deb_to_db(self, newfile, session)
self.pkg.write_dot_dak(targetdir)
# If this is a sourceful diff only upload that is moving
# cross-component we need to copy the .orig files into the new
# component too for the same reasons as above.
if self.pkg.changes["architecture"].has_key("source"):
for orig_file in self.pkg.orig_files.keys():
if not self.pkg.orig_files[orig_file].has_key("id"):
continue # Skip if it's not in the pool
orig_file_id = self.pkg.orig_files[orig_file]["id"]
if self.pkg.orig_files[orig_file]["location"] == dsc_location_id:
continue # Skip if the location didn't change
# Do the move
oldf = get_poolfile_by_id(orig_file_id, session)
old_filename = os.path.join(oldf.location.path, oldf.filename)
old_dat = {'size': oldf.filesize, 'md5sum': oldf.md5sum,
'sha1sum': oldf.sha1sum, 'sha256sum': oldf.sha256sum}
new_filename = os.path.join(utils.poolify(self.pkg.changes["source"], dsc_component), os.path.basename(old_filename))
# TODO: Care about size/md5sum collisions etc
(found, newf) = check_poolfile(new_filename, file_size, file_md5sum, dsc_location_id, session)
if newf is None:
utils.copy(old_filename, os.path.join(cnf["Dir::Pool"], new_filename))
newf = add_poolfile(new_filename, old_dat, dsc_location_id, session)
# TODO: Check that there's only 1 here
source = get_sources_from_name(self.pkg.changes["source"], self.pkg.changes["version"])[0]
dscf = get_dscfiles(source_id=source.source_id, poolfile_id=orig_file_id, session=session)[0]
dscf.poolfile_id = newf.file_id
session.add(dscf)
session.flush()
# Install the files into the pool
for newfile, entry in self.pkg.files.items():
destination = os.path.join(cnf["Dir::Pool"], entry["pool name"], newfile)
utils.move(newfile, destination)
self.logger.log(["installed", newfile, entry["type"], entry["size"], entry["architecture"]])
stats.accept_bytes += float(entry["size"])
# Move all the files into the accepted directory
utils.move(self.pkg.changes_file, targetdir)
# Copy the .changes file across for suite which need it.
copy_changes = {}
for suite_name in self.pkg.changes["distribution"].keys():
if cnf.has_key("Suite::%s::CopyChanges" % (suite_name)):
copy_changes[cnf["Suite::%s::CopyChanges" % (suite_name)]] = ""
for name, entry in sorted(self.pkg.files.items()):
utils.move(name, targetdir)
stats.accept_bytes += float(entry["size"])
for dest in copy_changes.keys():
utils.copy(self.pkg.changes_file, os.path.join(cnf["Dir::Root"], dest))
stats.accept_count += 1
# We're done - commit the database changes
session.commit()
# Our SQL session will automatically start a new transaction after
# the last commit
# Move the .changes into the 'done' directory
utils.move(self.pkg.changes_file,
os.path.join(cnf["Dir::Queue::Done"], os.path.basename(self.pkg.changes_file)))
if self.pkg.changes["architecture"].has_key("source") and cnf.get("Dir::UrgencyLog"):
UrgencyLog().log(self.pkg.dsc["source"], self.pkg.dsc["version"], self.pkg.changes["urgency"])
# Send accept mail, announce to lists, close bugs and check for
# override disparities
......@@ -1885,7 +1923,8 @@ distribution."""
self.update_subst()
self.Subst["__SUITE__"] = ""
self.Subst["__SUMMARY__"] = summary
mail_message = utils.TemplateSubst(self.Subst, accepttemplate)
mail_message = utils.TemplateSubst(self.Subst,
os.path.join(cnf["Dir::Templates"], 'process-unchecked.accepted'))
utils.send_mail(mail_message)
self.announce(short_summary, 1)
......@@ -1930,6 +1969,10 @@ distribution."""
#if res:
# utils.fubar(res)
session.commit()
# Finally...
stats.accept_count += 1
def check_override(self):
"""
......@@ -1968,15 +2011,21 @@ distribution."""
def remove(self, from_dir=None):
"""
Used (for instance) in p-u to remove the package from unchecked
Also removes the package from holding area.
"""
if from_dir is None:
os.chdir(self.pkg.directory)
else:
os.chdir(from_dir)
from_dir = self.pkg.directory
h = Holding()
for f in self.pkg.files.keys():
os.unlink(f)
os.unlink(self.pkg.changes_file)
os.unlink(os.path.join(from_dir, f))
if os.path.exists(os.path.join(h.holding_dir, f)):
os.unlink(os.path.join(h.holding_dir, f))
os.unlink(os.path.join(from_dir, self.pkg.changes_file))
if os.path.exists(os.path.join(h.holding_dir, self.pkg.changes_file)):
os.unlink(os.path.join(h.holding_dir, self.pkg.changes_file))
###########################################################################
......@@ -1984,9 +2033,11 @@ distribution."""
"""
Move files to dest with certain perms/changesperms
"""
utils.move(self.pkg.changes_file, dest, perms=changesperms)
h = Holding()
utils.move(os.path.join(h.holding_dir, self.pkg.changes_file),
dest, perms=changesperms)
for f in self.pkg.files.keys():
utils.move(f, dest, perms=perms)
utils.move(os.path.join(h.holding_dir, f), dest, perms=perms)
###########################################################################
......@@ -2377,6 +2428,7 @@ distribution."""
# This would fix the stupidity of changing something we often iterate over
# whilst we're doing it
del self.pkg.files[dsc_name]
dsc_entry["files id"] = i.file_id
if not orig_files.has_key(dsc_name):
orig_files[dsc_name] = {}
orig_files[dsc_name]["path"] = os.path.join(i.location.path, i.filename)
......
#!/usr/bin/env python
# vim:set et sw=4:
"""
Utility functions for process-upload
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2000, 2001, 2002, 2003, 2004, 2005, 2006 James Troup <james@nocrew.org>
@copyright: 2009 Joerg Jaspert <joerg@debian.org>
@copyright: 2009 Mark Hymers <mhy@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
from daklib import utils
from daklib.dbconn import *
from daklib.config import Config
###############################################################################
def determine_target(u):
cnf = Config()
queues = [ "New", "Autobyhand", "Byhand" ]
if cnf.FindB("Dinstall::SecurityQueueHandling"):
queues += [ "Unembargo", "Embargo" ]
else:
queues += [ "OldStableUpdate", "StableUpdate" ]
target = None
for q in queues:
if QueueInfo[q]["is"](u):
target = q
break
return target
################################################################################
def package_to_suite(u, suite):
if not u.pkg.changes["distribution"].has_key(suite):
return False
ret = True
if not u.pkg.changes["architecture"].has_key("source"):
s = DBConn().session()
q = s.query(SrcAssociation.sa_id)
q = q.join(Suite).filter_by(suite_name=suite)
q = q.join(DBSource).filter_by(source=u.pkg.changes['source'])
q = q.filter_by(version=u.pkg.changes['version']).limit(1)
# NB: Careful, this logic isn't what you would think it is
# Source is already in {old-,}proposed-updates so no need to hold
# Instead, we don't move to the holding area, we just do an ACCEPT
if q.count() > 0:
ret = False
s.close()
return ret
def package_to_queue(u, summary, short_summary, queue, perms=0660, build=True, announce=None):
cnf = Config()
dir = cnf["Dir::Queue::%s" % queue]
print "Moving to %s holding area" % queue.upper()
u.logger.log(["Moving to %s" % queue, u.pkg.changes_file])
u.move_to_dir(dir, perms=perms)
if build:
get_or_set_queue(queue.lower()).autobuild_upload(u.pkg, dir)
# Check for override disparities
u.check_override()
# Send accept mail, announce to lists and close bugs
if announce and not cnf["Dinstall::Options::No-Mail"]:
template = os.path.join(cnf["Dir::Templates"], announce)
u.update_subst()
u.Subst["__SUITE__"] = ""
mail_message = utils.TemplateSubst(u.Subst, template)
utils.send_mail(mail_message)
u.announce(short_summary, True)
################################################################################
def is_unembargo(u):
session = DBConn().session()
cnf = Config()
q = session.execute("SELECT package FROM disembargo WHERE package = :source AND version = :version", u.pkg.changes)
if q.rowcount > 0:
session.close()
return True
oldcwd = os.getcwd()
os.chdir(cnf["Dir::Queue::Disembargo"])
disdir = os.getcwd()
os.chdir(oldcwd)
ret = False
if u.pkg.directory == disdir:
if u.pkg.changes["architecture"].has_key("source"):
session.execute("INSERT INTO disembargo (package, version) VALUES (:package, :version)", u.pkg.changes)
session.commit()
ret = True
session.close()
return ret
def queue_unembargo(u, summary, short_summary, session=None):
return package_to_queue(u, summary, short_summary, "Unembargoed",
perms=0660, build=True, announce='process-unchecked.accepted')
################################################################################
def is_embargo(u):
# if embargoed queues are enabled always embargo
return True
def queue_embargo(u, summary, short_summary, session=None):
return package_to_queue(u, summary, short_summary, "Unembargoed",
perms=0660, build=True, announce='process-unchecked.accepted')
################################################################################
def is_stableupdate(u):
return package_to_suite(u, 'proposed-updates')
def do_stableupdate(u, summary, short_summary, session=None):
return package_to_queue(u, summary, short_summary, "ProposedUpdates",
perms=0664, build=False, announce=None)
################################################################################
def is_oldstableupdate(u):
return package_to_suite(u, 'oldstable-proposed-updates')
def do_oldstableupdate(u, summary, short_summary, session=None):
return package_to_queue(u, summary, short_summary, "OldProposedUpdates",
perms=0664, build=False, announce=None)
################################################################################
def is_autobyhand(u):
cnf = Config()
all_auto = 1
any_auto = 0
for f in u.pkg.files.keys():
if u.pkg.files[f].has_key("byhand"):
any_auto = 1
# filename is of form "PKG_VER_ARCH.EXT" where PKG, VER and ARCH
# don't contain underscores, and ARCH doesn't contain dots.
# further VER matches the .changes Version:, and ARCH should be in
# the .changes Architecture: list.
if f.count("_") < 2:
all_auto = 0
continue
(pckg, ver, archext) = f.split("_", 2)
if archext.count(".") < 1 or u.pkg.changes["version"] != ver:
all_auto = 0
continue
ABH = cnf.SubTree("AutomaticByHandPackages")
if not ABH.has_key(pckg) or \
ABH["%s::Source" % (pckg)] != u.pkg.changes["source"]:
print "not match %s %s" % (pckg, u.pkg.changes["source"])
all_auto = 0
continue
(arch, ext) = archext.split(".", 1)
if arch not in u.pkg.changes["architecture"]:
all_auto = 0
continue
u.pkg.files[f]["byhand-arch"] = arch
u.pkg.files[f]["byhand-script"] = ABH["%s::Script" % (pckg)]
return any_auto and all_auto
def do_autobyhand(u, summary, short_summary, session=None):
print "Attempting AUTOBYHAND."
byhandleft = True
for f, entry in u.pkg.files.items():
byhandfile = f
if not entry.has_key("byhand"):
continue
if not entry.has_key("byhand-script"):
byhandleft = True
continue
os.system("ls -l %s" % byhandfile)
result = os.system("%s %s %s %s %s" % (
entry["byhand-script"],
byhandfile,
u.pkg.changes["version"],
entry["byhand-arch"],
os.path.abspath(u.pkg.changes_file)))
if result == 0:
os.unlink(byhandfile)
del entry
else:
print "Error processing %s, left as byhand." % (f)
byhandleft = True
if byhandleft:
do_byhand(u, summary, short_summary, session)
else:
u.accept(summary, short_summary, session)
u.check_override()
################################################################################
def is_byhand(u):
for f in u.pkg.files.keys():
if u.pkg.files[f].has_key("byhand"):
return True
return False
def do_byhand(u, summary, short_summary, session=None):
return package_to_queue(u, summary, short_summary, "Byhand",
perms=0660, build=False, announce=None)
################################################################################
def is_new(u):
for f in u.pkg.files.keys():
if u.pkg.files[f].has_key("new"):
return True
return False
def acknowledge_new(u, summary, short_summary, session=None):
cnf = Config()
print "Moving to NEW queue."
u.logger.log(["Moving to new", u.pkg.changes_file])
u.move_to_dir(cnf["Dir::Queue::New"], perms=0640, changesperms=0644)
if not cnf["Dinstall::Options::No-Mail"]:
print "Sending new ack."
template = os.path.join(cnf["Dir::Templates"], 'process-unchecked.new')
u.update_subst()
u.Subst["__SUMMARY__"] = summary
new_ack_message = utils.TemplateSubst(u.Subst, template)
utils.send_mail(new_ack_message)
################################################################################
# q-unapproved hax0ring
QueueInfo = {
"New": { "is": is_new, "process": acknowledge_new },
"Autobyhand" : { "is" : is_autobyhand, "process": do_autobyhand },
"Byhand" : { "is": is_byhand, "process": do_byhand },
"OldStableUpdate" : { "is": is_oldstableupdate,
"process": do_oldstableupdate },
"StableUpdate" : { "is": is_stableupdate, "process": do_stableupdate },
"Unembargo" : { "is": is_unembargo, "process": queue_unembargo },
"Embargo" : { "is": is_embargo, "process": queue_embargo },
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册