Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
喜羊羊3508
Dak
提交
6d0c1b14
D
Dak
项目概览
喜羊羊3508
/
Dak
10 个月 前同步成功
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
Dak
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
前往新版Gitcode,体验更适合开发者的 AI 搜索 >>
提交
6d0c1b14
编写于
10月 30, 2009
作者:
M
Mark Hymers
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add changes and queue handling
Signed-off-by:
N
Mark Hymers
<
mhy@debian.org
>
上级
4bb08a11
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
214 addition
and
178 deletion
+214
-178
dak/clean_suites.py
dak/clean_suites.py
+1
-0
dak/dakdb/update21.py
dak/dakdb/update21.py
+129
-0
dak/update_db.py
dak/update_db.py
+1
-1
daklib/dbconn.py
daklib/dbconn.py
+78
-161
daklib/queue.py
daklib/queue.py
+5
-16
未找到文件。
dak/clean_suites.py
浏览文件 @
6d0c1b14
...
...
@@ -163,6 +163,7 @@ def check_files(now_date, delete_date, max_delete, session):
SELECT id, filename FROM files f
WHERE NOT EXISTS (SELECT 1 FROM binaries b WHERE b.file = f.id)
AND NOT EXISTS (SELECT 1 FROM dsc_files df WHERE df.file = f.id)
AND NOT EXISTS (SELECT 1 FROM changes_pool_files cpf WHERE cpf.fileid = f.id)
AND last_used IS NULL
ORDER BY filename"""
)
...
...
dak/dakdb/update21.py
0 → 100755
浏览文件 @
6d0c1b14
#!/usr/bin/env python
# coding=utf8
"""
Modify queue autobuild support
@contact: Debian FTP Master <ftpmaster@debian.org>
@copyright: 2009 Mark Hymers <mhy@debian.org>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
################################################################################
import
psycopg2
import
time
import
os
import
datetime
import
traceback
from
daklib.dak_exceptions
import
DBUpdateError
from
daklib.config
import
Config
################################################################################
def
do_update
(
self
):
print
"Updating queue_build table"
try
:
c
=
self
.
db
.
cursor
()
cnf
=
Config
()
print
"Adding copy_files field to queue table"
c
.
execute
(
"ALTER TABLE queue ADD copy_pool_files BOOL NOT NULL DEFAULT FALSE"
)
print
"Adding queue_files table"
c
.
execute
(
"""CREATE TABLE queue_files (
id SERIAL PRIMARY KEY,
queueid INT4 NOT NULL REFERENCES queue(id) ON DELETE RESTRICT,
insertdate TIMESTAMP NOT NULL DEFAULT now(),
lastused TIMESTAMP DEFAULT NULL,
filename TEXT NOT NULL,
fileid INT4 REFERENCES files(id) ON DELETE CASCADE)"""
)
c
.
execute
(
"""SELECT queue_build.filename, queue_build.last_used, queue_build.queue
FROM queue_build"""
)
for
r
in
c
.
fetchall
():
print
r
[
0
]
filename
=
r
[
0
]
last_used
=
r
[
1
]
queue
=
r
[
2
]
try
:
endlink
=
os
.
readlink
(
filename
)
c
.
execute
(
"SELECT files.id FROM files WHERE filename LIKE '%%%s'"
%
endlink
[
endlink
.
rindex
(
'/'
)
+
1
:])
f
=
c
.
fetchone
()
c
.
execute
(
"""INSERT INTO queue_files (queueid, lastused, filename, fileid) VALUES
(%s, now(), %s, %s)"""
,
(
queue
,
filename
[
filename
.
rindex
(
'/'
)
+
1
:],
f
[
0
]))
except
OSError
,
e
:
print
"Can't find file %s (%s)"
%
(
filename
,
e
)
print
"Dropping old queue_build table"
c
.
execute
(
"DROP TABLE queue_build"
)
print
"Adding changes_pending_files table"
c
.
execute
(
"""CREATE TABLE changes_pending_files (
id SERIAL PRIMARY KEY,
changeid INT4 NOT NULL REFERENCES known_changes(id) ON DELETE CASCADE,
filename TEXT NOT NULL,
source BOOL NOT NULL DEFAULT FALSE,
filesize BIGINT NOT NULL,
md5sum TEXT NOT NULL,
sha1sum TEXT NOT NULL,
sha256sum TEXT NOT NULL)"""
)
print
"Adding changes_pool_files table"
c
.
execute
(
"""CREATE TABLE changes_pool_files (
changeid INT4 NOT NULL REFERENCES known_changes(id) ON DELETE CASCADE,
fileid INT4 NOT NULL REFERENCES files(id) ON DELETE RESTRICT,
PRIMARY KEY (changeid, fileid))"""
)
print
"Adding suite_queue_copy table"
c
.
execute
(
"""CREATE TABLE suite_queue_copy (
suite INT4 NOT NULL REFERENCES suite(id),
queue INT4 NOT NULL REFERENCES queue(id),
PRIMARY KEY (suite, queue))"""
)
# Link all suites from accepted
c
.
execute
(
"""SELECT suite.id FROM suite"""
)
for
s
in
c
.
fetchall
():
c
.
execute
(
"""INSERT INTO suite_queue_copy (suite, queue) VALUES (%s, (SELECT id FROM queue WHERE queue_name = 'accepted'))"""
,
s
)
# Parse the config and add any buildd stuff
cnf
=
Config
()
c
.
execute
(
"""INSERT INTO queue (queue_name, path) VALUES ('buildd', '%s')"""
%
cnf
[
"Dir::QueueBuild"
].
rstrip
(
'/'
))
for
s
in
cnf
.
ValueList
(
"Dinstall::QueueBuildSuites"
):
c
.
execute
(
"""INSERT INTO suite_queue_copy (suite, queue)
VALUES ( (SELECT id FROM suite WHERE suite_name = '%s'),
(SELECT id FROM queue WHERE queue_name = 'buildd'))"""
%
s
.
lower
())
print
"Committing"
c
.
execute
(
"UPDATE config SET value = '21' WHERE name = 'db_revision'"
)
self
.
db
.
commit
()
except
psycopg2
.
InternalError
,
msg
:
self
.
db
.
rollback
()
raise
DBUpdateError
,
"Unable to apply queue_build 21, rollback issued. Error message : %s"
%
(
str
(
msg
))
dak/update_db.py
浏览文件 @
6d0c1b14
...
...
@@ -44,7 +44,7 @@ from daklib.dak_exceptions import DBUpdateError
################################################################################
Cnf
=
None
required_database_schema
=
2
0
required_database_schema
=
2
1
################################################################################
...
...
daklib/dbconn.py
浏览文件 @
6d0c1b14
...
...
@@ -37,6 +37,7 @@ import os
import
re
import
psycopg2
import
traceback
import
datetime
from
inspect
import
getargspec
...
...
@@ -728,6 +729,10 @@ class PoolFile(object):
def
__repr__
(
self
):
return
'<PoolFile %s>'
%
self
.
filename
@
property
def
fullpath
(
self
):
return
os
.
path
.
join
(
self
.
location
.
path
,
self
.
filename
)
__all__
.
append
(
'PoolFile'
)
@
session_wrapper
...
...
@@ -1119,6 +1124,18 @@ def get_knownchange(filename, session=None):
__all__
.
append
(
'get_knownchange'
)
################################################################################
class
KnownChangePendingFile
(
object
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
pass
def
__repr__
(
self
):
return
'<KnownChangePendingFile %s>'
%
self
.
known_change_pending_file_id
__all__
.
append
(
'KnownChangePendingFile'
)
################################################################################
class
Location
(
object
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
pass
...
...
@@ -1562,127 +1579,55 @@ class Queue(object):
def
__repr__
(
self
):
return
'<Queue %s>'
%
self
.
queue_name
def
a
utobuild_upload
(
self
,
changes
,
srcpath
,
session
=
Non
e
):
"""
Update queue_build database table used for incoming autobuild support
.
def
a
dd_file_from_pool
(
self
,
poolfil
e
):
"""
Copies a file into the pool. Assumes that the PoolFile object is
attached to the same SQLAlchemy session as the Queue object is
.
@type changes: Changes
@param changes: changes object for the upload to process
The caller is responsible for committing after calling this function."""
poolfile_basename
=
poolfile
.
filename
[
poolfile
.
filename
.
rindex
(
os
.
sep
)
+
1
:]
@type srcpath: string
@param srcpath: path for the queue file entries/link destinations
# Check if we have a file of this name or this ID already
for
f
in
self
.
queuefiles
:
if
f
.
fileid
is
not
None
and
f
.
fileid
==
poolfile
.
file_id
or
\
f
.
poolfile
.
filename
==
poolfile_basename
:
# In this case, update the QueueFile entry so we
# don't remove it too early
f
.
lastused
=
datetime
.
now
()
DBConn
().
session
().
object_session
(
pf
).
add
(
f
)
return
f
@type session: SQLAlchemy session
@param session: Optional SQLAlchemy session. If this is passed, the
caller is responsible for ensuring a transaction has begun and
committing the results or rolling back based on the result code. If
not passed, a commit will be performed at the end of the function,
otherwise the caller is responsible for commiting.
# Prepare QueueFile object
qf
=
QueueFile
()
qf
.
queue_id
=
self
.
queue_id
qf
.
lastused
=
datetime
.
now
()
qf
.
filename
=
dest
@rtype: NoneType or string
@return: None if the operation failed, a string describing the error if not
"""
targetpath
=
qf
.
fullpath
queuepath
=
os
.
path
.
join
(
self
.
path
,
poolfile_basename
)
privatetrans
=
False
if
session
is
None
:
session
=
DBConn
().
session
()
privatetrans
=
True
# TODO: Remove by moving queue config into the database
conf
=
Config
()
for
suitename
in
changes
.
changes
[
"distribution"
].
keys
():
# TODO: Move into database as:
# buildqueuedir TEXT DEFAULT NULL (i.e. NULL is no build)
# buildqueuecopy BOOLEAN NOT NULL DEFAULT FALSE (i.e. default is symlink)
# This also gets rid of the SecurityQueueBuild hack below
if
suitename
not
in
conf
.
ValueList
(
"Dinstall::QueueBuildSuites"
):
continue
# Find suite object
s
=
get_suite
(
suitename
,
session
)
if
s
is
None
:
return
"INTERNAL ERROR: Could not find suite %s"
%
suitename
# TODO: Get from database as above
dest_dir
=
conf
[
"Dir::QueueBuild"
]
# TODO: Move into database as above
if
conf
.
FindB
(
"Dinstall::SecurityQueueBuild"
):
dest_dir
=
os
.
path
.
join
(
dest_dir
,
suitename
)
for
file_entry
in
changes
.
files
.
keys
():
src
=
os
.
path
.
join
(
srcpath
,
file_entry
)
dest
=
os
.
path
.
join
(
dest_dir
,
file_entry
)
# TODO: Move into database as above
if
conf
.
FindB
(
"Dinstall::SecurityQueueBuild"
):
# Copy it since the original won't be readable by www-data
import
utils
utils
.
copy
(
src
,
dest
)
else
:
# Create a symlink to it
os
.
symlink
(
src
,
dest
)
qb
=
QueueBuild
()
qb
.
suite_id
=
s
.
suite_id
qb
.
queue_id
=
self
.
queue_id
qb
.
filename
=
dest
qb
.
in_queue
=
True
session
.
add
(
qb
)
# If the .orig tarballs are in the pool, create a symlink to
# them (if one doesn't already exist)
for
dsc_file
in
changes
.
dsc_files
.
keys
():
# Skip all files except orig tarballs
from
daklib.regexes
import
re_is_orig_source
if
not
re_is_orig_source
.
match
(
dsc_file
):
continue
# Skip orig files not identified in the pool
if
not
(
changes
.
orig_files
.
has_key
(
dsc_file
)
and
changes
.
orig_files
[
dsc_file
].
has_key
(
"id"
)):
continue
orig_file_id
=
changes
.
orig_files
[
dsc_file
][
"id"
]
dest
=
os
.
path
.
join
(
dest_dir
,
dsc_file
)
# If it doesn't exist, create a symlink
if
not
os
.
path
.
exists
(
dest
):
q
=
session
.
execute
(
"SELECT l.path, f.filename FROM location l, files f WHERE f.id = :id and f.location = l.id"
,
{
'id'
:
orig_file_id
})
res
=
q
.
fetchone
()
if
not
res
:
return
"[INTERNAL ERROR] Couldn't find id %s in files table."
%
(
orig_file_id
)
src
=
os
.
path
.
join
(
res
[
0
],
res
[
1
])
os
.
symlink
(
src
,
dest
)
# Add it to the list of packages for later processing by apt-ftparchive
qb
=
QueueBuild
()
qb
.
suite_id
=
s
.
suite_id
qb
.
queue_id
=
self
.
queue_id
qb
.
filename
=
dest
qb
.
in_queue
=
True
session
.
add
(
qb
)
# If it does, update things to ensure it's not removed prematurely
else
:
qb
=
get_queue_build
(
dest
,
s
.
suite_id
,
session
)
if
qb
is
None
:
qb
.
in_queue
=
True
qb
.
last_used
=
None
session
.
add
(
qb
)
try
:
if
self
.
copy_pool_files
:
# We need to copy instead of symlink
import
utils
utils
.
copy
(
targetfile
,
queuepath
)
# NULL in the fileid field implies a copy
qf
.
fileid
=
None
else
:
os
.
symlink
(
targetfile
,
queuepath
)
qf
.
fileid
=
poolfile
.
file_id
except
OSError
:
return
None
if
privatetrans
:
session
.
commit
()
session
.
close
()
# Get the same session as the PoolFile is using and add the qf to it
DBConn
().
session
().
object_session
(
poolfile
).
add
(
qf
)
return
qf
return
None
__all__
.
append
(
'Queue'
)
@
session_wrapper
def
get_
or_set_
queue
(
queuename
,
session
=
None
):
def
get_queue
(
queuename
,
session
=
None
):
"""
Returns Queue object for given C{queue name}, creating it if it does not
exist.
...
...
@@ -1701,60 +1646,22 @@ def get_or_set_queue(queuename, session=None):
q
=
session
.
query
(
Queue
).
filter_by
(
queue_name
=
queuename
)
try
:
ret
=
q
.
one
()
ret
urn
q
.
one
()
except
NoResultFound
:
queue
=
Queue
()
queue
.
queue_name
=
queuename
session
.
add
(
queue
)
session
.
commit_or_flush
()
ret
=
queue
return
ret
return
None
__all__
.
append
(
'get_
or_set_
queue'
)
__all__
.
append
(
'get_queue'
)
################################################################################
class
Queue
Build
(
object
):
class
Queue
File
(
object
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
pass
def
__repr__
(
self
):
return
'<QueueBuild %s (%s)>'
%
(
self
.
filename
,
self
.
queue_id
)
__all__
.
append
(
'QueueBuild'
)
@
session_wrapper
def
get_queue_build
(
filename
,
suite
,
session
=
None
):
"""
Returns QueueBuild object for given C{filename} and C{suite}.
@type filename: string
@param filename: The name of the file
@type suiteid: int or str
@param suiteid: Suite name or ID
@type session: Session
@param session: Optional SQLA session object (a temporary one will be
generated if not supplied)
return
'<QueueFile %s (%s)>'
%
(
self
.
filename
,
self
.
queue_id
)
@rtype: Queue
@return: Queue object for the given queue
"""
if
isinstance
(
suite
,
int
):
q
=
session
.
query
(
QueueBuild
).
filter_by
(
filename
=
filename
).
filter_by
(
suite_id
=
suite
)
else
:
q
=
session
.
query
(
QueueBuild
).
filter_by
(
filename
=
filename
)
q
=
q
.
join
(
Suite
).
filter_by
(
suite_name
=
suite
)
try
:
return
q
.
one
()
except
NoResultFound
:
return
None
__all__
.
append
(
'get_queue_build'
)
__all__
.
append
(
'QueueFile'
)
################################################################################
...
...
@@ -2354,6 +2261,8 @@ class DBConn(Singleton):
self
.
tbl_content_associations
=
Table
(
'content_associations'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_content_file_names
=
Table
(
'content_file_names'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_content_file_paths
=
Table
(
'content_file_paths'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_changes_pending_files
=
Table
(
'changes_pending_files'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_changes_pool_files
=
Table
(
'changes_pool_files'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_dsc_files
=
Table
(
'dsc_files'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_files
=
Table
(
'files'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_fingerprint
=
Table
(
'fingerprint'
,
self
.
db_meta
,
autoload
=
True
)
...
...
@@ -2368,7 +2277,7 @@ class DBConn(Singleton):
self
.
tbl_pending_content_associations
=
Table
(
'pending_content_associations'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_priority
=
Table
(
'priority'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_queue
=
Table
(
'queue'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_queue_
build
=
Table
(
'queue_build
'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_queue_
files
=
Table
(
'queue_files
'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_section
=
Table
(
'section'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_source
=
Table
(
'source'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_source_acl
=
Table
(
'source_acl'
,
self
.
db_meta
,
autoload
=
True
)
...
...
@@ -2378,6 +2287,7 @@ class DBConn(Singleton):
self
.
tbl_suite
=
Table
(
'suite'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_suite_architectures
=
Table
(
'suite_architectures'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_suite_src_formats
=
Table
(
'suite_src_formats'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_suite_queue_copy
=
Table
(
'suite_queue_copy'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_uid
=
Table
(
'uid'
,
self
.
db_meta
,
autoload
=
True
)
self
.
tbl_upload_blocks
=
Table
(
'upload_blocks'
,
self
.
db_meta
,
autoload
=
True
)
...
...
@@ -2458,7 +2368,14 @@ class DBConn(Singleton):
keyring_id
=
self
.
tbl_keyrings
.
c
.
id
))
mapper
(
KnownChange
,
self
.
tbl_known_changes
,
properties
=
dict
(
known_change_id
=
self
.
tbl_known_changes
.
c
.
id
))
properties
=
dict
(
known_change_id
=
self
.
tbl_known_changes
.
c
.
id
,
poolfiles
=
relation
(
PoolFile
,
secondary
=
self
.
tbl_changes_pool_files
,
backref
=
"changeslinks"
),
files
=
relation
(
KnownChangePendingFile
,
backref
=
"changesfile"
)))
mapper
(
KnownChangePendingFile
,
self
.
tbl_changes_pending_files
,
properties
=
dict
(
known_change_pending_file_id
=
self
.
tbl_changes_pending_files
.
id
))
mapper
(
KeyringACLMap
,
self
.
tbl_keyring_acl_map
,
properties
=
dict
(
keyring_acl_map_id
=
self
.
tbl_keyring_acl_map
.
c
.
id
,
...
...
@@ -2501,10 +2418,9 @@ class DBConn(Singleton):
mapper
(
Queue
,
self
.
tbl_queue
,
properties
=
dict
(
queue_id
=
self
.
tbl_queue
.
c
.
id
))
mapper
(
QueueBuild
,
self
.
tbl_queue_build
,
properties
=
dict
(
suite_id
=
self
.
tbl_queue_build
.
c
.
suite
,
queue_id
=
self
.
tbl_queue_build
.
c
.
queue
,
queue
=
relation
(
Queue
,
backref
=
'queuebuild'
)))
mapper
(
QueueFile
,
self
.
tbl_queue_files
,
properties
=
dict
(
queue
=
relation
(
Queue
,
backref
=
'queuefiles'
),
poolfile
=
relation
(
PoolFile
,
backref
=
'queueinstances'
)))
mapper
(
Section
,
self
.
tbl_section
,
properties
=
dict
(
section_id
=
self
.
tbl_section
.
c
.
id
))
...
...
@@ -2553,7 +2469,8 @@ class DBConn(Singleton):
mapper
(
Suite
,
self
.
tbl_suite
,
properties
=
dict
(
suite_id
=
self
.
tbl_suite
.
c
.
id
,
policy_queue
=
relation
(
Queue
)))
policy_queue
=
relation
(
Queue
),
copy_queues
=
relation
(
Queue
,
secondary
=
self
.
tbl_suite_queue_copy
)))
mapper
(
SuiteArchitecture
,
self
.
tbl_suite_architectures
,
properties
=
dict
(
suite_id
=
self
.
tbl_suite_architectures
.
c
.
suite
,
...
...
daklib/queue.py
浏览文件 @
6d0c1b14
...
...
@@ -1923,23 +1923,12 @@ distribution."""
os
.
rename
(
temp_filename
,
filename
)
os
.
chmod
(
filename
,
0644
)
# Its is Cnf["Dir::Queue::Accepted"] here, not targetdir!
# <Ganneff> we do call queue_build too
# <mhy> well yes, we'd have had to if we were inserting into accepted
# <Ganneff> now. thats database only.
# <mhy> urgh, that's going to get messy
# <Ganneff> so i make the p-n call to it *also* using accepted/
# <mhy> but then the packages will be in the queue_build table without the files being there
# <Ganneff> as the buildd queue is only regenerated whenever unchecked runs
# <mhy> ah, good point
# <Ganneff> so it will work out, as unchecked move it over
# <mhy> that's all completely sick
# <Ganneff> yes
# This routine returns None on success or an error on failure
res
=
get_or_set_queue
(
'accepted'
).
autobuild_upload
(
self
.
pkg
,
cnf
[
"Dir::Queue::Accepted"
])
if
res
:
utils
.
fubar
(
res
)
# TODO: Replace queue copying using the new queue.add_file_from_pool routine
# and by looking up which queues in suite.copy_queues
#res = get_queue('accepted').autobuild_upload(self.pkg, cnf["Dir::Queue::Accepted"])
#if res:
# utils.fubar(res)
def
check_override
(
self
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录