提交 8a9016a0 编写于 作者: C Cleber Rosa

Merge remote-tracking branch 'lmr/migrate-virt-test-from-autotest-v4'

......@@ -65,7 +65,9 @@ clean:
test -L etc/avocado/conf.d/virt.conf && rm -f etc/avocado/conf.d/virt.conf || true
test -L avocado/core/plugins/virt_test.py && rm -f avocado/core/plugins/virt_test.py || true
test -L avocado/core/plugins/virt_test_list.py && rm -f avocado/core/plugins/virt_test_list.py || true
test -L avocado/core/plugins/virt_test_bootstrap.py && rm -f avocado/core/plugins/virt_test_bootstrap.py || true
test -L etc/avocado/conf.d/virt-test.conf && rm -f etc/avocado/conf.d/virt-test.conf || true
test -L virttest && rm -f virttest || true
check: clean check_cyclical modules_boundaries
selftests/checkall
......@@ -79,15 +81,17 @@ modules_boundaries:
link: link_virt link_vt
link_virt:
test -d ../avocado-virt/avocado/virt && ln -s ../../avocado-virt/avocado/virt avocado || true
test -f ../avocado-virt/etc/avocado/conf.d/virt.conf && ln -s ../../../../avocado-virt/etc/avocado/conf.d/virt.conf etc/avocado/conf.d/ || true
test -f ../avocado-virt/avocado/core/plugins/virt.py && ln -s ../../../../avocado-virt/avocado/core/plugins/virt.py avocado/core/plugins/ || true
test -f ../avocado-virt/avocado/core/plugins/virt_bootstrap.py && ln -s ../../../../avocado-virt/avocado/core/plugins/virt_bootstrap.py avocado/core/plugins/ || true
test -d ../avocado-virt/avocado/virt && ln -sf ../../avocado-virt/avocado/virt avocado || true
test -f ../avocado-virt/etc/avocado/conf.d/virt.conf && ln -sf ../../../../avocado-virt/etc/avocado/conf.d/virt.conf etc/avocado/conf.d/ || true
test -f ../avocado-virt/avocado/core/plugins/virt.py && ln -sf ../../../../avocado-virt/avocado/core/plugins/virt.py avocado/core/plugins/ || true
test -f ../avocado-virt/avocado/core/plugins/virt_bootstrap.py && ln -sf ../../../../avocado-virt/avocado/core/plugins/virt_bootstrap.py avocado/core/plugins/ || true
link_vt:
test -f ../avocado-vt/etc/avocado/conf.d/virt-test.conf && ln -s ../../../../avocado-vt/etc/avocado/conf.d/virt-test.conf etc/avocado/conf.d/ || true
test -f ../avocado-vt/avocado/core/plugins/virt_test.py && ln -s ../../../../avocado-vt/avocado/core/plugins/virt_test.py avocado/core/plugins/ || true
test -f ../avocado-vt/avocado/core/plugins/virt_test_list.py && ln -s ../../../../avocado-vt/avocado/core/plugins/virt_test_list.py avocado/core/plugins/ || true
test -f ../avocado-vt/etc/avocado/conf.d/virt-test.conf && ln -sf ../../../../avocado-vt/etc/avocado/conf.d/virt-test.conf etc/avocado/conf.d/ || true
test -f ../avocado-vt/avocado/core/plugins/virt_test.py && ln -sf ../../../../avocado-vt/avocado/core/plugins/virt_test.py avocado/core/plugins/ || true
test -f ../avocado-vt/avocado/core/plugins/virt_test_list.py && ln -sf ../../../../avocado-vt/avocado/core/plugins/virt_test_list.py avocado/core/plugins/ || true
test -f ../avocado-vt/avocado/core/plugins/virt_test_bootstrap.py && ln -sf ../../../../avocado-vt/avocado/core/plugins/virt_test_bootstrap.py avocado/core/plugins/ || true
test -d ../avocado-vt/virttest && ln -sf ../avocado-vt/virttest . || true
man: man/avocado.1 man/avocado-rest-client.1
......
#!/usr/bin/python
"""
A class and functions used for running and controlling child processes.
:copyright: 2008-2009 Red Hat Inc.
"""
import os
import sys
import pty
import select
import termios
import fcntl
import tempfile
import logging
import shutil
# ATTENTION: Do not import avocado libraries in this side of the aexpect
# module. This side of the module will be executed stand alone and will not
# have access to the avocado libraries. If you do that, the server won't
# start, making the client to wait indefinitely for the server and creating
# a deadlock. Import avocado libs if necessary below, where the other avocado
# imports are defined.
BASE_DIR = os.environ.get('TMPDIR', '/tmp')
# If you want to debug problems with your aexpect instances, setting
# DEBUG = True will leave the temporary files created by aexpect around
# so you can look at them.
DEBUG = False
# The following helper functions are shared by the server and the client.
def _lock(filename):
if not os.path.exists(filename):
open(filename, "w").close()
fd = os.open(filename, os.O_RDWR)
fcntl.lockf(fd, fcntl.LOCK_EX)
return fd
def _unlock(fd):
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
def _locked(filename):
try:
fd = os.open(filename, os.O_RDWR)
except Exception:
return False
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
os.close(fd)
return True
fcntl.lockf(fd, fcntl.LOCK_UN)
os.close(fd)
return False
def _wait(filename):
fd = _lock(filename)
_unlock(fd)
def _makeraw(shell_fd):
attr = termios.tcgetattr(shell_fd)
attr[0] &= ~(termios.IGNBRK | termios.BRKINT | termios.PARMRK |
termios.ISTRIP | termios.INLCR | termios.IGNCR |
termios.ICRNL | termios.IXON)
attr[1] &= ~termios.OPOST
attr[2] &= ~(termios.CSIZE | termios.PARENB)
attr[2] |= termios.CS8
attr[3] &= ~(termios.ECHO | termios.ECHONL | termios.ICANON |
termios.ISIG | termios.IEXTEN)
termios.tcsetattr(shell_fd, termios.TCSANOW, attr)
def _makestandard(shell_fd, echo):
attr = termios.tcgetattr(shell_fd)
attr[0] &= ~termios.INLCR
attr[0] &= ~termios.ICRNL
attr[0] &= ~termios.IGNCR
attr[1] &= ~termios.OPOST
if echo:
attr[3] |= termios.ECHO
else:
attr[3] &= ~termios.ECHO
termios.tcsetattr(shell_fd, termios.TCSANOW, attr)
def _get_filenames(base_dir, a_id):
return [os.path.join(base_dir, 'aexpect_%s' % a_id, s) for s in
"shell-pid", "status", "output", "inpipe", "ctrlpipe",
"lock-server-running", "lock-client-starting",
"server-log"]
def _get_reader_filename(base_dir, a_id, reader):
return os.path.join(base_dir, 'aexpect_%s' % a_id, "outpipe-%s" % reader)
# The following is the server part of the module.
if __name__ == "__main__":
a_id = sys.stdin.readline().strip()
echo = sys.stdin.readline().strip() == "True"
readers = sys.stdin.readline().strip().split(",")
command = sys.stdin.readline().strip() + " && echo %s > /dev/null" % a_id
# Define filenames to be used for communication
(shell_pid_filename,
status_filename,
output_filename,
inpipe_filename,
ctrlpipe_filename,
lock_server_running_filename,
lock_client_starting_filename,
log_filename) = _get_filenames(BASE_DIR, a_id)
logging_format = '%(asctime)s %(levelname)-5.5s| %(message)s'
date_format = '%m/%d %H:%M:%S'
logging.basicConfig(filename=log_filename, level=logging.DEBUG,
format=logging_format, datefmt=date_format)
server_log = logging.getLogger()
server_log.info('Server %s starting with parameters:' % str(a_id))
server_log.info('echo: %s' % str(echo))
server_log.info('readers: %s' % str(readers))
server_log.info('command: %s' % str(command))
# Populate the reader filenames list
reader_filenames = [_get_reader_filename(BASE_DIR, a_id, reader)
for reader in readers]
# Set $TERM = dumb
os.putenv("TERM", "dumb")
server_log.info('Forking child process for command')
(shell_pid, shell_fd) = pty.fork()
if shell_pid == 0:
# Child process: run the command in a subshell
if len(command) > 255:
new_stack = None
if len(command) > 2000000:
# Stack size would probably not suffice (and no open files)
# (1 + len(command) * 4 / 8290304) * 8196
# 2MB => 8196kb, 4MB => 16392, ...
new_stack = (1 + len(command) / 2072576) * 8196
command = "ulimit -s %s\nulimit -n 819200\n%s" % (new_stack,
command)
tmp_dir = os.path.join(BASE_DIR, a_id)
tmp_file = tempfile.mktemp(suffix='.sh',
prefix='aexpect-', dir=tmp_dir)
fd_cmd = open(tmp_file, "w")
fd_cmd.write(command)
fd_cmd.close()
os.execv("/bin/bash", ["/bin/bash", "-c", "source %s" % tmp_file])
os.remove(tmp_file)
else:
os.execv("/bin/bash", ["/bin/bash", "-c", command])
else:
# Parent process
server_log.info('Acquiring server lock on %s' % lock_server_running_filename)
lock_server_running = _lock(lock_server_running_filename)
# Set terminal echo on/off and disable pre- and post-processing
_makestandard(shell_fd, echo)
server_log.info('Opening output file %s' % output_filename)
output_file = open(output_filename, "w")
server_log.info('Opening input pipe %s' % inpipe_filename)
os.mkfifo(inpipe_filename)
inpipe_fd = os.open(inpipe_filename, os.O_RDWR)
server_log.info('Opening control pipe %s' % ctrlpipe_filename)
os.mkfifo(ctrlpipe_filename)
ctrlpipe_fd = os.open(ctrlpipe_filename, os.O_RDWR)
# Open output pipes (readers)
reader_fds = []
for filename in reader_filenames:
server_log.info('Opening output pipe %s' % filename)
os.mkfifo(filename)
reader_fds.append(os.open(filename, os.O_RDWR))
server_log.info('Reader fd list: %s' % reader_fds)
# Write shell PID to file
server_log.info('Writing shell PID file %s' % shell_pid_filename)
fileobj = open(shell_pid_filename, "w")
fileobj.write(str(shell_pid))
fileobj.close()
# Print something to stdout so the client can start working
print("Server %s ready" % a_id)
sys.stdout.flush()
# Initialize buffers
buffers = ["" for reader in readers]
# Read from child and write to files/pipes
server_log.info('Entering main read loop')
while True:
check_termination = False
# Make a list of reader pipes whose buffers are not empty
fds = [fd for (i, fd) in enumerate(reader_fds) if buffers[i]]
# Wait until there's something to do
r, w, x = select.select([shell_fd, inpipe_fd, ctrlpipe_fd],
fds, [], 0.5)
# If a reader pipe is ready for writing --
for (i, fd) in enumerate(reader_fds):
if fd in w:
bytes_written = os.write(fd, buffers[i])
buffers[i] = buffers[i][bytes_written:]
if ctrlpipe_fd in r:
cmd_len = int(os.read(ctrlpipe_fd, 10))
data = os.read(ctrlpipe_fd, cmd_len)
if data == "raw":
_makeraw(shell_fd)
elif data == "standard":
_makestandard(shell_fd, echo)
# If there's data to read from the child process --
if shell_fd in r:
try:
data = os.read(shell_fd, 16384)
except OSError:
data = ""
if not data:
check_termination = True
# Remove carriage returns from the data -- they often cause
# trouble and are normally not needed
data = data.replace("\r", "")
output_file.write(data)
output_file.flush()
for i in range(len(readers)):
buffers[i] += data
# If os.read() raised an exception or there was nothing to read --
if check_termination or shell_fd not in r:
pid, status = os.waitpid(shell_pid, os.WNOHANG)
if pid:
status = os.WEXITSTATUS(status)
break
# If there's data to read from the client --
if inpipe_fd in r:
data = os.read(inpipe_fd, 1024)
os.write(shell_fd, data)
server_log.info('Out of the main read loop. Writing status to %s' % status_filename)
fileobj = open(status_filename, "w")
fileobj.write(str(status))
fileobj.close()
# Wait for the client to finish initializing
_wait(lock_client_starting_filename)
# Close all files and pipes
output_file.close()
os.close(inpipe_fd)
server_log.info('Closed input pipe')
for fd in reader_fds:
os.close(fd)
server_log.info('Closed reader fd %s' % fd)
_unlock(lock_server_running)
server_log.info('Exiting normally')
sys.exit(0)
# The following is the client part of the module.
import time
import signal
import re
import threading
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
from ..utils import astring
from ..utils import data_factory
from ..utils import process
from ..utils import genio
from ..utils import wait
from ..utils import path as utils_path
class ExpectError(Exception):
def __init__(self, patterns, output):
Exception.__init__(self, patterns, output)
self.patterns = patterns
self.output = output
def _pattern_str(self):
if len(self.patterns) == 1:
return "pattern %r" % self.patterns[0]
else:
return "patterns %r" % self.patterns
def __str__(self):
return ("Unknown error occurred while looking for %s (output: %r)" %
(self._pattern_str(), self.output))
class ExpectTimeoutError(ExpectError):
def __str__(self):
return ("Timeout expired while looking for %s (output: %r)" %
(self._pattern_str(), self.output))
class ExpectProcessTerminatedError(ExpectError):
def __init__(self, patterns, status, output):
ExpectError.__init__(self, patterns, output)
self.status = status
def __str__(self):
return ("Process terminated while looking for %s "
"(status: %s, output: %r)" % (self._pattern_str(),
self.status, self.output))
class ShellError(Exception):
def __init__(self, cmd, output):
Exception.__init__(self, cmd, output)
self.cmd = cmd
self.output = output
def __str__(self):
return ("Could not execute shell command %r (output: %r)" %
(self.cmd, self.output))
class ShellTimeoutError(ShellError):
def __str__(self):
return ("Timeout expired while waiting for shell command to "
"complete: %r (output: %r)" % (self.cmd, self.output))
class ShellProcessTerminatedError(ShellError):
# Raised when the shell process itself (e.g. ssh, netcat, telnet)
# terminates unexpectedly
def __init__(self, cmd, status, output):
ShellError.__init__(self, cmd, output)
self.status = status
def __str__(self):
return ("Shell process terminated while waiting for command to "
"complete: %r (status: %s, output: %r)" %
(self.cmd, self.status, self.output))
class ShellCmdError(ShellError):
# Raised when a command executed in a shell terminates with a nonzero
# exit code (status)
def __init__(self, cmd, status, output):
ShellError.__init__(self, cmd, output)
self.status = status
def __str__(self):
return ("Shell command failed: %r (status: %s, output: %r)" %
(self.cmd, self.status, self.output))
class ShellStatusError(ShellError):
# Raised when the command's exit status cannot be obtained
def __str__(self):
return ("Could not get exit status of command: %r (output: %r)" %
(self.cmd, self.output))
def run_tail(command, termination_func=None, output_func=None, output_prefix="",
timeout=1.0, auto_close=True):
"""
Run a subprocess in the background and collect its output and exit status.
Run command as a subprocess. Call output_func with each line of output
from the subprocess (prefixed by output_prefix). Call termination_func
when the subprocess terminates. Return when timeout expires or when the
subprocess exits -- whichever occurs first.
:param command: The shell command to execute
:param termination_func: A function to call when the process terminates
(should take an integer exit status parameter)
:param output_func: A function to call with each line of output from
the subprocess (should take a string parameter)
:param output_prefix: A string to pre-pend to each line of the output,
before passing it to stdout_func
:param timeout: Time duration (in seconds) to wait for the subprocess to
terminate before returning
:param auto_close: If True, close() the instance automatically when its
reference count drops to zero (default False).
:return: A Expect object.
"""
process = Tail(command=command,
termination_func=termination_func,
output_func=output_func,
output_prefix=output_prefix,
auto_close=auto_close)
end_time = time.time() + timeout
while time.time() < end_time and process.is_alive():
time.sleep(0.1)
return process
def run_bg(command, termination_func=None, output_func=None, output_prefix="",
timeout=1.0, auto_close=True):
"""
Run a subprocess in the background and collect its output and exit status.
Run command as a subprocess. Call output_func with each line of output
from the subprocess (prefixed by output_prefix). Call termination_func
when the subprocess terminates. Return when timeout expires or when the
subprocess exits -- whichever occurs first.
:param command: The shell command to execute
:param termination_func: A function to call when the process terminates
(should take an integer exit status parameter)
:param output_func: A function to call with each line of output from
the subprocess (should take a string parameter)
:param output_prefix: A string to pre-pend to each line of the output,
before passing it to stdout_func
:param timeout: Time duration (in seconds) to wait for the subprocess to
terminate before returning
:param auto_close: If True, close() the instance automatically when its
reference count drops to zero (default False).
:return: A Expect object.
"""
process = Expect(command=command,
termination_func=termination_func,
output_func=output_func,
output_prefix=output_prefix,
auto_close=auto_close)
end_time = time.time() + timeout
while time.time() < end_time and process.is_alive():
time.sleep(0.1)
return process
def run_fg(command, output_func=None, output_prefix="", timeout=1.0):
"""
Run a subprocess in the foreground and collect its output and exit status.
Run command as a subprocess. Call output_func with each line of output
from the subprocess (prefixed by prefix). Return when timeout expires or
when the subprocess exits -- whichever occurs first. If timeout expires
and the subprocess is still running, kill it before returning.
:param command: The shell command to execute
:param output_func: A function to call with each line of output from
the subprocess (should take a string parameter)
:param output_prefix: A string to pre-pend to each line of the output,
before passing it to stdout_func
:param timeout: Time duration (in seconds) to wait for the subprocess to
terminate before killing it and returning
:return: A 2-tuple containing the exit status of the process and its
STDOUT/STDERR output. If timeout expires before the process
terminates, the returned status is None.
"""
process = run_bg(command, None, output_func, output_prefix, timeout)
output = process.get_output()
if process.is_alive():
status = None
else:
status = process.get_status()
process.close()
return (status, output)
class Spawn(object):
"""
This class is used for spawning and controlling a child process.
A new instance of this class can either run a new server (a small Python
program that reads output from the child process and reports it to the
client and to a text file) or attach to an already running server.
When a server is started it runs the child process.
The server writes output from the child's STDOUT and STDERR to a text file.
The text file can be accessed at any time using get_output().
In addition, the server opens as many pipes as requested by the client and
writes the output to them.
The pipes are requested and accessed by classes derived from Spawn.
These pipes are referred to as "readers".
The server also receives input from the client and sends it to the child
process.
An instance of this class can be pickled. Every derived class is
responsible for restoring its own state by properly defining
__getinitargs__().
The first named pipe is used by _tail(), a function that runs in the
background and reports new output from the child as it is produced.
The second named pipe is used by a set of functions that read and parse
output as requested by the user in an interactive manner, similar to
pexpect.
When unpickled it automatically
resumes _tail() if needed.
"""
def __init__(self, command=None, a_id=None, auto_close=False, echo=False,
linesep="\n"):
"""
Initialize the class and run command as a child process.
:param command: Command to run, or None if accessing an already running
server.
:param a_id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param auto_close: If True, close() the instance automatically when its
reference count drops to zero (default False).
:param echo: Boolean indicating whether echo should be initially
enabled for the pseudo terminal running the subprocess. This
parameter has an effect only when starting a new server.
:param linesep: Line separator to be appended to strings sent to the
child process by sendline().
"""
self.a_id = a_id or data_factory.generate_random_string(8)
self.log_file = None
base_dir = os.path.join(BASE_DIR, 'aexpect_%s' % self.a_id)
# Define filenames for communication with server
try:
utils_path.init_dir(base_dir)
except Exception:
pass
(self.shell_pid_filename,
self.status_filename,
self.output_filename,
self.inpipe_filename,
self.ctrlpipe_filename,
self.lock_server_running_filename,
self.lock_client_starting_filename,
self.server_log_filename) = _get_filenames(BASE_DIR,
self.a_id)
self.command = command
# Remember some attributes
self.auto_close = auto_close
self.echo = echo
self.linesep = linesep
# Make sure the 'readers' and 'close_hooks' attributes exist
if not hasattr(self, "readers"):
self.readers = []
if not hasattr(self, "close_hooks"):
self.close_hooks = []
# Define the reader filenames
self.reader_filenames = dict(
(reader, _get_reader_filename(BASE_DIR, self.a_id, reader))
for reader in self.readers)
# Let the server know a client intends to open some pipes;
# if the executed command terminates quickly, the server will wait for
# the client to release the lock before exiting
lock_client_starting = _lock(self.lock_client_starting_filename)
# Start the server (which runs the command)
if command:
sub = subprocess.Popen("%s %s" % (sys.executable, __file__),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Send parameters to the server
sub.stdin.write("%s\n" % self.a_id)
sub.stdin.write("%s\n" % echo)
sub.stdin.write("%s\n" % ",".join(self.readers))
sub.stdin.write("%s\n" % command)
# Wait for the server to complete its initialization
while "Server %s ready" % self.a_id not in sub.stdout.readline():
pass
# Open the reading pipes
self.reader_fds = {}
try:
assert(_locked(self.lock_server_running_filename))
for reader, filename in self.reader_filenames.items():
self.reader_fds[reader] = os.open(filename, os.O_RDONLY)
except Exception:
pass
# Allow the server to continue
_unlock(lock_client_starting)
# The following two functions are defined to make sure the state is set
# exclusively by the constructor call as specified in __getinitargs__().
def __reduce__(self):
return self.__class__, (self.__getinitargs__())
def __getstate__(self):
pass
def __setstate__(self, state):
pass
def __getinitargs__(self):
# Save some information when pickling -- will be passed to the
# constructor upon unpickling
return (None, self.a_id, self.auto_close, self.echo, self.linesep)
def __del__(self):
self._close_reader_fds()
if self.auto_close:
self.close()
def _add_reader(self, reader):
"""
Add a reader whose file descriptor can be obtained with _get_fd().
Should be called before __init__(). Intended for use by derived
classes.
:param reader: The name of the reader.
"""
if not hasattr(self, "readers"):
self.readers = []
self.readers.append(reader)
def _add_close_hook(self, hook):
"""
Add a close hook function to be called when close() is called.
The function will be called after the process terminates but before
final cleanup. Intended for use by derived classes.
:param hook: The hook function.
"""
if not hasattr(self, "close_hooks"):
self.close_hooks = []
self.close_hooks.append(hook)
def _get_fd(self, reader):
"""
Return an open file descriptor corresponding to the specified reader
pipe. If no such reader exists, or the pipe could not be opened,
return None. Intended for use by derived classes.
:param reader: The name of the reader.
"""
return self.reader_fds.get(reader)
def _close_reader_fds(self):
"""
Close all reader file descriptors.
"""
for fd in self.reader_fds.values():
try:
os.close(fd)
except OSError:
pass
def get_id(self):
"""
Return the instance's a_id attribute, which may be used to access the
process in the future.
"""
return self.a_id
def get_pid(self):
"""
Return the PID of the process.
Note: this may be the PID of the shell process running the user given
command.
"""
try:
fileobj = open(self.shell_pid_filename, "r")
pid = int(fileobj.read())
fileobj.close()
return pid
except Exception:
return None
def get_status(self):
"""
Wait for the process to exit and return its exit status, or None
if the exit status is not available.
"""
_wait(self.lock_server_running_filename)
try:
fileobj = open(self.status_filename, "r")
status = int(fileobj.read())
fileobj.close()
return status
except Exception:
return None
def get_output(self):
"""
Return the STDOUT and STDERR output of the process so far.
"""
try:
fileobj = open(self.output_filename, "r")
output = fileobj.read()
fileobj.close()
return output
except Exception:
return ""
def get_stripped_output(self):
"""
Return the STDOUT and STDERR output without the console codes escape
and sequences of the process so far.
"""
return astring.strip_console_codes(self.get_output())
def is_alive(self):
"""
Return True if the process is running.
"""
return _locked(self.lock_server_running_filename)
def is_defunct(self):
"""
Return True if the process is defunct (zombie).
"""
return process.process_in_ptree_is_defunct(self.get_pid())
def kill(self, sig=signal.SIGKILL):
"""
Kill the child process if alive
"""
# Kill it if it's alive
if self.is_alive():
process.kill_process_tree(self.get_pid(), sig)
def close(self, sig=signal.SIGKILL):
"""
Kill the child process if it's alive and remove temporary files.
:param sig: The signal to send the process when attempting to kill it.
"""
self.kill(sig=sig)
# Wait for the server to exit
_wait(self.lock_server_running_filename)
# Call all cleanup routines
for hook in self.close_hooks:
hook(self)
# Close reader file descriptors
self._close_reader_fds()
self.reader_fds = {}
# Remove all used files
if not DEBUG:
shutil.rmtree(os.path.join(BASE_DIR, 'aexpect_%s' % self.a_id))
def set_linesep(self, linesep):
"""
Sets the line separator string (usually "\\n").
:param linesep: Line separator string.
"""
self.linesep = linesep
def send(self, cont=""):
"""
Send a string to the child process.
:param cont: String to send to the child process.
"""
try:
fd = os.open(self.inpipe_filename, os.O_RDWR)
os.write(fd, cont)
os.close(fd)
except Exception:
pass
def sendline(self, cont=""):
"""
Send a string followed by a line separator to the child process.
:param cont: String to send to the child process.
"""
self.send(cont + self.linesep)
def send_ctrl(self, control_str=""):
"""
Send a control string to the aexpect process.
:param control_str: Control string to send to the child process
container.
"""
try:
fd = os.open(self.ctrlpipe_filename, os.O_RDWR)
os.write(fd, "%10d%s" % (len(control_str), control_str))
os.close(fd)
except Exception:
pass
_thread_kill_requested = False
def kill_tail_threads():
"""
Kill all Tail threads.
After calling this function no new threads should be started.
"""
global _thread_kill_requested
_thread_kill_requested = True
for t in threading.enumerate():
if hasattr(t, "name") and t.name.startswith("tail_thread"):
t.join(10)
_thread_kill_requested = False
class Tail(Spawn):
"""
This class runs a child process in the background and sends its output in
real time, line-by-line, to a callback function.
See Spawn's docstring.
This class uses a single pipe reader to read data in real time from the
child process and report it to a given callback function.
When the child process exits, its exit status is reported to an additional
callback function.
When this class is unpickled, it automatically resumes reporting output.
"""
def __init__(self, command=None, a_id=None, auto_close=False, echo=False,
linesep="\n", termination_func=None, termination_params=(),
output_func=None, output_params=(), output_prefix="",
thread_name=None):
"""
Initialize the class and run command as a child process.
:param command: Command to run, or None if accessing an already running
server.
:param a_id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param auto_close: If True, close() the instance automatically when its
reference count drops to zero (default False).
:param echo: Boolean indicating whether echo should be initially
enabled for the pseudo terminal running the subprocess. This
parameter has an effect only when starting a new server.
:param linesep: Line separator to be appended to strings sent to the
child process by sendline().
:param termination_func: Function to call when the process exits. The
function must accept a single exit status parameter.
:param termination_params: Parameters to send to termination_func
before the exit status.
:param output_func: Function to call whenever a line of output is
available from the STDOUT or STDERR streams of the process.
The function must accept a single string parameter. The string
does not include the final newline.
:param output_params: Parameters to send to output_func before the
output line.
:param output_prefix: String to prepend to lines sent to output_func.
:param thread_name: Name of thread to better identify hanging threads.
"""
# Add a reader and a close hook
self._add_reader("tail")
self._add_close_hook(Tail._join_thread)
self._add_close_hook(Tail._close_log_file)
# Init the superclass
Spawn.__init__(self, command, a_id, auto_close, echo, linesep)
if thread_name is None:
self.thread_name = ("tail_thread_%s_%s") % (self.a_id,
str(command)[:10])
else:
self.thread_name = thread_name
# Remember some attributes
self.termination_func = termination_func
self.termination_params = termination_params
self.output_func = output_func
self.output_params = output_params
self.output_prefix = output_prefix
# Start the thread in the background
self.tail_thread = None
if termination_func or output_func:
self._start_thread()
def __reduce__(self):
return self.__class__, (self.__getinitargs__())
def __getinitargs__(self):
return Spawn.__getinitargs__(self) + (self.termination_func,
self.termination_params,
self.output_func,
self.output_params,
self.output_prefix,
self.thread_name)
def set_termination_func(self, termination_func):
"""
Set the termination_func attribute. See __init__() for details.
:param termination_func: Function to call when the process terminates.
Must take a single parameter -- the exit status.
"""
self.termination_func = termination_func
if termination_func and not self.tail_thread:
self._start_thread()
def set_termination_params(self, termination_params):
"""
Set the termination_params attribute. See __init__() for details.
:param termination_params: Parameters to send to termination_func
before the exit status.
"""
self.termination_params = termination_params
def set_output_func(self, output_func):
"""
Set the output_func attribute. See __init__() for details.
:param output_func: Function to call for each line of STDOUT/STDERR
output from the process. Must take a single string parameter.
"""
self.output_func = output_func
if output_func and not self.tail_thread:
self._start_thread()
def set_output_params(self, output_params):
"""
Set the output_params attribute. See __init__() for details.
:param output_params: Parameters to send to output_func before the
output line.
"""
self.output_params = output_params
def set_output_prefix(self, output_prefix):
"""
Set the output_prefix attribute. See __init__() for details.
:param output_prefix: String to pre-pend to each line sent to
output_func (see set_output_callback()).
"""
self.output_prefix = output_prefix
def set_log_file(self, filename):
"""
Set a log file name for this tail instance.
:param filename: Base name of the log.
"""
self.log_file = filename
def _close_log_file(self):
if self.log_file is not None:
genio.close_log_file(self.log_file)
def _tail(self):
def print_line(text):
# Pre-pend prefix and remove trailing whitespace
text = self.output_prefix + text.rstrip()
# Pass text to output_func
try:
params = self.output_params + (text,)
self.output_func(*params)
except TypeError:
pass
try:
fd = self._get_fd("tail")
bfr = ""
while True:
global _thread_kill_requested
if _thread_kill_requested:
try:
os.close(fd)
except Exception:
pass
return
try:
# See if there's any data to read from the pipe
r, w, x = select.select([fd], [], [], 0.05)
except Exception:
break
if fd in r:
# Some data is available; read it
new_data = os.read(fd, 1024)
if not new_data:
break
bfr += new_data
# Send the output to output_func line by line
# (except for the last line)
if self.output_func:
lines = bfr.split("\n")
for line in lines[:-1]:
print_line(line)
# Leave only the last line
last_newline_index = bfr.rfind("\n")
bfr = bfr[last_newline_index + 1:]
else:
# No output is available right now; flush the bfr
if bfr:
print_line(bfr)
bfr = ""
# The process terminated; print any remaining output
if bfr:
print_line(bfr)
# Get the exit status, print it and send it to termination_func
status = self.get_status()
if status is None:
return
print_line("(Process terminated with status %s)" % status)
try:
params = self.termination_params + (status,)
self.termination_func(*params)
except TypeError:
pass
finally:
self.tail_thread = None
def _start_thread(self):
self.tail_thread = threading.Thread(target=self._tail,
name=self.thread_name)
self.tail_thread.start()
def _join_thread(self):
# Wait for the tail thread to exit
# (it's done this way because self.tail_thread may become None at any
# time)
t = self.tail_thread
if t:
t.join()
class Expect(Tail):
"""
This class runs a child process in the background and provides expect-like
services.
It also provides all of Tail's functionality.
"""
def __init__(self, command=None, a_id=None, auto_close=True, echo=False,
linesep="\n", termination_func=None, termination_params=(),
output_func=None, output_params=(), output_prefix="",
thread_name=None):
"""
Initialize the class and run command as a child process.
:param command: Command to run, or None if accessing an already running
server.
:param a_id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param auto_close: If True, close() the instance automatically when its
reference count drops to zero (default False).
:param echo: Boolean indicating whether echo should be initially
enabled for the pseudo terminal running the subprocess. This
parameter has an effect only when starting a new server.
:param linesep: Line separator to be appended to strings sent to the
child process by sendline().
:param termination_func: Function to call when the process exits. The
function must accept a single exit status parameter.
:param termination_params: Parameters to send to termination_func
before the exit status.
:param output_func: Function to call whenever a line of output is
available from the STDOUT or STDERR streams of the process.
The function must accept a single string parameter. The string
does not include the final newline.
:param output_params: Parameters to send to output_func before the
output line.
:param output_prefix: String to prepend to lines sent to output_func.
"""
# Add a reader
self._add_reader("expect")
# Init the superclass
Tail.__init__(self, command, a_id, auto_close, echo, linesep,
termination_func, termination_params,
output_func, output_params, output_prefix, thread_name)
def __reduce__(self):
return self.__class__, (self.__getinitargs__())
def __getinitargs__(self):
return Tail.__getinitargs__(self)
def read_nonblocking(self, internal_timeout=None, timeout=None):
"""
Read from child until there is nothing to read for timeout seconds.
:param internal_timeout: Time (seconds) to wait before we give up
reading from the child process, or None to
use the default value.
:param timeout: Timeout for reading child process output.
"""
if internal_timeout is None:
internal_timeout = 0.1
end_time = None
if timeout:
end_time = time.time() + timeout
fd = self._get_fd("expect")
data = ""
while True:
try:
r, w, x = select.select([fd], [], [], internal_timeout)
except Exception:
return data
if fd in r:
new_data = os.read(fd, 1024)
if not new_data:
return data
data += new_data
else:
return data
if end_time and time.time() > end_time:
return data
def match_patterns(self, cont, patterns):
"""
Match cont against a list of patterns.
Return the index of the first pattern that matches a substring of cont.
None and empty strings in patterns are ignored.
If no match is found, return None.
:param cont: input string
:param patterns: List of strings (regular expression patterns).
"""
for i in range(len(patterns)):
if not patterns[i]:
continue
if re.search(patterns[i], cont):
return i
def match_patterns_multiline(self, cont, patterns):
"""
Match list of lines against a list of patterns.
Return the index of the first pattern that matches a substring of cont.
None and empty strings in patterns are ignored.
If no match is found, return None.
:param cont: List of strings (input strings)
:param patterns: List of strings (regular expression patterns). The
pattern priority is from the last to first.
"""
for i in range(-len(patterns), 0):
if not patterns[i]:
continue
for line in cont:
if re.search(patterns[i], line):
return i
def read_until_output_matches(self, patterns, filter_func=lambda x: x,
timeout=60, internal_timeout=None,
print_func=None, match_func=None):
"""
Read from child using read_nonblocking until a pattern matches.
Read using read_nonblocking until a match is found using match_patterns,
or until timeout expires. Before attempting to search for a match, the
data is filtered using the filter_func function provided.
:param patterns: List of strings (regular expression patterns)
:param filter_func: Function to apply to the data read from the child before
attempting to match it against the patterns (should take and
return a string)
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:param match_func: Function to compare the output and patterns.
:return: Tuple containing the match index and the data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the child process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
if not match_func:
match_func = self.match_patterns
fd = self._get_fd("expect")
o = ""
end_time = time.time() + timeout
while True:
try:
r, w, x = select.select([fd], [], [],
max(0, end_time - time.time()))
except (select.error, TypeError):
break
if not r:
raise ExpectTimeoutError(patterns, o)
# Read data from child
data = self.read_nonblocking(internal_timeout,
end_time - time.time())
if not data:
break
# Print it if necessary
if print_func:
for line in data.splitlines():
print_func(line)
# Look for patterns
o += data
match = match_func(filter_func(o), patterns)
if match is not None:
return match, o
# Check if the child has terminated
if wait.wait_for(lambda: not self.is_alive(), 5, 0, 0.1):
raise ExpectProcessTerminatedError(patterns, self.get_status(), o)
else:
# This shouldn't happen
raise ExpectError(patterns, o)
def read_until_last_word_matches(self, patterns, timeout=60,
internal_timeout=None, print_func=None):
"""
Read using read_nonblocking until the last word of the output matches
one of the patterns (using match_patterns), or until timeout expires.
:param patterns: A list of strings (regular expression patterns)
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: A tuple containing the match index and the data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the child process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
def get_last_word(cont):
if cont:
return cont.split()[-1]
else:
return ""
return self.read_until_output_matches(patterns, get_last_word,
timeout, internal_timeout,
print_func)
def read_until_last_line_matches(self, patterns, timeout=60,
internal_timeout=None, print_func=None):
"""
Read using read_nonblocking until the last non-empty line matches a pattern.
Read using read_nonblocking until the last non-empty line of the output
matches one of the patterns (using match_patterns), or until timeout
expires. Return a tuple containing the match index (or None if no match
was found) and the data read so far.
:param patterns: A list of strings (regular expression patterns)
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: A tuple containing the match index and the data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the child process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
def get_last_nonempty_line(cont):
nonempty_lines = [l for l in cont.splitlines() if l.strip()]
if nonempty_lines:
return nonempty_lines[-1]
else:
return ""
return self.read_until_output_matches(patterns, get_last_nonempty_line,
timeout, internal_timeout,
print_func)
def read_until_any_line_matches(self, patterns, timeout=60,
internal_timeout=None, print_func=None):
"""
Read using read_nonblocking until any line matches a pattern.
Read using read_nonblocking until any line of the output matches
one of the patterns (using match_patterns_multiline), or until timeout
expires. Return a tuple containing the match index (or None if no match
was found) and the data read so far.
:param patterns: A list of strings (regular expression patterns)
Consider using '^' in the beginning.
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: A tuple containing the match index and the data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the child process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
return self.read_until_output_matches(patterns,
lambda x: x.splitlines(
), timeout,
internal_timeout, print_func,
self.match_patterns_multiline)
class ShellSession(Expect):
"""
This class runs a child process in the background. It it suited for
processes that provide an interactive shell, such as SSH and Telnet.
It provides all services of Expect and Tail. In addition, it
provides command running services, and a utility function to test the
process for responsiveness.
"""
def __init__(self, command=None, a_id=None, auto_close=True, echo=False,
linesep="\n", termination_func=None, termination_params=(),
output_func=None, output_params=(), output_prefix="",
thread_name=None, prompt=r"[\#\$]\s*$",
status_test_command="echo $?"):
"""
Initialize the class and run command as a child process.
:param command: Command to run, or None if accessing an already running
server.
:param a_id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param auto_close: If True, close() the instance automatically when its
reference count drops to zero (default True).
:param echo: Boolean indicating whether echo should be initially
enabled for the pseudo terminal running the subprocess. This
parameter has an effect only when starting a new server.
:param linesep: Line separator to be appended to strings sent to the
child process by sendline().
:param termination_func: Function to call when the process exits. The
function must accept a single exit status parameter.
:param termination_params: Parameters to send to termination_func
before the exit status.
:param output_func: Function to call whenever a line of output is
available from the STDOUT or STDERR streams of the process.
The function must accept a single string parameter. The string
does not include the final newline.
:param output_params: Parameters to send to output_func before the
output line.
:param output_prefix: String to prepend to lines sent to output_func.
:param prompt: Regular expression describing the shell's prompt line.
:param status_test_command: Command to be used for getting the last
exit status of commands run inside the shell (used by
cmd_status_output() and friends).
"""
# Init the superclass
Expect.__init__(self, command, a_id, auto_close, echo, linesep,
termination_func, termination_params,
output_func, output_params, output_prefix, thread_name)
# Remember some attributes
self.prompt = prompt
self.status_test_command = status_test_command
def __reduce__(self):
return self.__class__, (self.__getinitargs__())
def __getinitargs__(self):
return Expect.__getinitargs__(self) + (self.prompt,
self.status_test_command)
@classmethod
def remove_command_echo(cls, cont, cmd):
if cont and cont.splitlines()[0] == cmd:
cont = "".join(cont.splitlines(True)[1:])
return cont
@classmethod
def remove_last_nonempty_line(cls, cont):
return "".join(cont.rstrip().splitlines(True)[:-1])
def set_prompt(self, prompt):
"""
Set the prompt attribute for later use by read_up_to_prompt.
:param String that describes the prompt contents.
"""
self.prompt = prompt
def set_status_test_command(self, status_test_command):
"""
Set the command to be sent in order to get the last exit status.
:param status_test_command: Command that will be sent to get the last
exit status.
"""
self.status_test_command = status_test_command
def is_responsive(self, timeout=5.0):
"""
Return True if the process responds to STDIN/terminal input.
Send a newline to the child process (e.g. SSH or Telnet) and read some
output using read_nonblocking().
If all is OK, some output should be available (e.g. the shell prompt).
In that case return True. Otherwise return False.
:param timeout: Time duration to wait before the process is considered
unresponsive.
"""
# Read all output that's waiting to be read, to make sure the output
# we read next is in response to the newline sent
self.read_nonblocking(internal_timeout=0, timeout=timeout)
# Send a newline
self.sendline()
# Wait up to timeout seconds for some output from the child
end_time = time.time() + timeout
while time.time() < end_time:
time.sleep(0.5)
if self.read_nonblocking(0, end_time - time.time()).strip():
return True
# No output -- report unresponsive
return False
def read_up_to_prompt(self, timeout=60, internal_timeout=None,
print_func=None):
"""
Read using read_nonblocking until the last non-empty line matches the prompt.
Read using read_nonblocking until the last non-empty line of the output
matches the prompt regular expression set by set_prompt, or until
timeout expires.
:param timeout: The duration (in seconds) to wait until a match is
found
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being
read (should take a string parameter)
:return: The data read so far
:raise ExpectTimeoutError: Raised if timeout expires
:raise ExpectProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ExpectError: Raised if an unknown error occurs
"""
return self.read_until_last_line_matches([self.prompt], timeout,
internal_timeout,
print_func)[1]
def cmd_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Send a command and return its output.
:param cmd: Command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: The output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellError: Raised if an unknown error occurs
"""
logging.debug("Sending command: %s" % cmd)
self.read_nonblocking(0, timeout)
self.sendline(cmd)
try:
o = self.read_up_to_prompt(timeout, internal_timeout, print_func)
except ExpectError, e:
o = self.remove_command_echo(e.output, cmd)
if isinstance(e, ExpectTimeoutError):
raise ShellTimeoutError(cmd, o)
elif isinstance(e, ExpectProcessTerminatedError):
raise ShellProcessTerminatedError(cmd, e.status, o)
else:
raise ShellError(cmd, o)
# Remove the echoed command and the final shell prompt
return self.remove_last_nonempty_line(self.remove_command_echo(o, cmd))
def cmd_output_safe(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Send a command and return its output (serial sessions).
In serial sessions, frequently the kernel might print debug or
error messages that make read_up_to_prompt to timeout. Let's try
to be a little more robust and send a carriage return, to see if
we can get to the prompt.
:param cmd: Command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: The output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellError: Raised if an unknown error occurs
"""
logging.debug("Sending command (safe): %s" % cmd)
self.read_nonblocking(0, timeout)
self.sendline(cmd)
o = ""
success = False
start_time = time.time()
while (time.time() - start_time) < timeout:
try:
o += self.read_up_to_prompt(0.5)
success = True
break
except ExpectError, e:
o = self.remove_command_echo(e.output, cmd)
if isinstance(e, ExpectTimeoutError):
self.sendline()
elif isinstance(e, ExpectProcessTerminatedError):
raise ShellProcessTerminatedError(cmd, e.status, o)
else:
raise ShellError(cmd, o)
if not success:
raise ShellTimeoutError(cmd, o)
# Remove the echoed command and the final shell prompt
return self.remove_last_nonempty_line(self.remove_command_echo(o, cmd))
def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Send a command and return its exit status and output.
:param cmd: Command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: A tuple (status, output) where status is the exit status and
output is the output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
"""
o = self.cmd_output(cmd, timeout, internal_timeout, print_func)
try:
# Send the 'echo $?' (or equivalent) command to get the exit status
s = self.cmd_output(self.status_test_command, 10, internal_timeout)
except ShellError:
raise ShellStatusError(cmd, o)
# Get the first line consisting of digits only
digit_lines = [l for l in s.splitlines() if l.strip().isdigit()]
if digit_lines:
return int(digit_lines[0].strip()), o
else:
raise ShellStatusError(cmd, o)
def cmd_status(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Send a command and return its exit status.
:param cmd: Command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: The exit status of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
"""
return self.cmd_status_output(cmd, timeout, internal_timeout,
print_func)[0]
def cmd(self, cmd, timeout=60, internal_timeout=None, print_func=None,
ok_status=[0, ], ignore_all_errors=False):
"""
Send a command and return its output. If the command's exit status is
nonzero, raise an exception.
:param cmd: Command to send (must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:param ok_status: do not raise ShellCmdError in case that exit status
is one of ok_status. (default is [0,])
:param ignore_all_errors: toggles whether or not an exception should be
raised on any error.
:return: The output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellError: Raised if the exit status cannot be obtained or if
an unknown error occurs
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
:raise ShellCmdError: Raised if the exit status is nonzero
"""
try:
s, o = self.cmd_status_output(cmd, timeout, internal_timeout,
print_func)
if s not in ok_status:
raise ShellCmdError(cmd, s, o)
return o
except Exception:
if ignore_all_errors:
pass
else:
raise
def get_command_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Alias for cmd_output() for backward compatibility.
"""
return self.cmd_output(cmd, timeout, internal_timeout, print_func)
def get_command_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Alias for cmd_status_output() for backward compatibility.
"""
return self.cmd_status_output(cmd, timeout, internal_timeout,
print_func)
def get_command_status(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Alias for cmd_status() for backward compatibility.
"""
return self.cmd_status(cmd, timeout, internal_timeout, print_func)
......@@ -65,6 +65,26 @@ def string_to_bitlist(data):
return result
def shell_escape(command):
"""
Escape special characters from a command so that it can be passed
as a double quoted (" ") string in a (ba)sh command.
:param command: the command string to escape.
:return: The escaped command string. The required englobing double
quotes are NOT added and so should be added at some point by
the caller.
See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
"""
command = command.replace("\\", "\\\\")
command = command.replace("$", r'\$')
command = command.replace('"', r'\"')
command = command.replace('`', r'\`')
return command
def strip_console_codes(output, custom_codes=None):
"""
Remove the Linux console escape and control sequences from the console
......
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# This code was inspired in the autotest project,
#
# client/base_utils.py
# Original author: Martin J Bligh <mbligh@google.com>
# Original author: John Admanski <jadmanski@google.com>
"""
Get information from the current's machine CPU.
"""
import re
def _list_matches(lst, pattern):
"""
True if any item in list matches the specified pattern.
"""
compiled = re.compile(pattern)
for element in lst:
match = compiled.search(element)
if match:
return 1
return 0
def _get_cpu_info():
"""
Reads /proc/cpuinfo and returns a list of file lines
:returns: `list` of lines from /proc/cpuinfo file
:rtype: `list`
"""
with open('/proc/cpuinfo', 'r') as f:
cpuinfo = f.readlines()
return cpuinfo
def cpu_has_flags(flags):
"""
Check if a list of flags are available on current CPU info
:param flags: A `list` of cpu flags that must exists on the current CPU.
:type flags: `list`
:returns: `bool` True if all the flags were found or False if not
:rtype: `list`
"""
cpu_info = _get_cpu_info()
if not isinstance(flags, list):
flags = [flags]
for flag in flags:
if not _list_matches(cpu_info, '.*%s.*' % flag):
return False
return True
def get_cpu_vendor_name():
"""
Get the current cpu vendor name
:returns: string 'intel' or 'amd' or 'power7' depending on the current CPU architecture.
:rtype: `string`
"""
vendors_map = {
'intel': ("GenuineIntel", ),
'amd': ("AMD", ),
'power7': ("POWER7", )
}
cpu_info = _get_cpu_info()
for vendor, identifiers in vendors_map.items():
for identifier in identifiers:
if _list_matches(cpu_info, identifier):
return vendor
return None
def get_cpu_arch():
"""
Work out which CPU architecture we're running on
"""
cpuinfo = _get_cpu_info()
if _list_matches(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
return 'power'
elif _list_matches(cpuinfo, '^cpu.*POWER4'):
return 'power4'
elif _list_matches(cpuinfo, '^cpu.*POWER5'):
return 'power5'
elif _list_matches(cpuinfo, '^cpu.*POWER6'):
return 'power6'
elif _list_matches(cpuinfo, '^cpu.*POWER7'):
return 'power7'
elif _list_matches(cpuinfo, '^cpu.*POWER8'):
return 'power8'
elif _list_matches(cpuinfo, '^cpu.*PPC970'):
return 'power970'
elif _list_matches(cpuinfo, 'ARM'):
return 'arm'
elif _list_matches(cpuinfo, '^flags.*:.* lm .*'):
return 'x86_64'
else:
return 'i386'
......@@ -24,6 +24,8 @@ import shutil
import urllib2
from . import aurl
from . import output
from . import crypto
log = logging.getLogger('avocado.test')
......@@ -71,15 +73,48 @@ def url_download(url, filename, data=None, timeout=300):
src_file.close()
def get_file(src, dst, permissions=None):
def url_download_interactive(url, output_file, title='', chunk_size=102400):
"""
Get a file from src and put it in dest, returning dest path.
:param src: source path or URL. May be local or a remote file.
:param dst: destination path.
:param permissions: (optional) set access permissions.
:return: destination path.
Interactively downloads a given file url to a given output file.
:type url: string
:param url: URL for the file to be download
:type output_file: string
:param output_file: file name or absolute path on which to save the file to
:type title: string
:param title: optional title to go along the progress bar
:type chunk_size: integer
:param chunk_size: amount of data to read at a time
"""
output_dir = os.path.dirname(output_file)
output_file = open(output_file, 'w+b')
input_file = urllib2.urlopen(url)
try:
file_size = int(input_file.headers['Content-Length'])
except KeyError:
raise ValueError('Could not find file size in HTTP headers')
logging.info('Downloading %s, %s to %s', os.path.basename(url),
output.display_data_size(file_size), output_dir)
progress_bar = output.ProgressBar(maximum=file_size, title=title)
# Download the file, while interactively updating the progress
progress_bar.draw()
while True:
data = input_file.read(chunk_size)
if data:
progress_bar.append_amount(len(data))
output_file.write(data)
else:
progress_bar.update_amount(file_size)
break
output_file.close()
def _get_file(src, dst, permissions=None):
if src == dst:
return
......@@ -91,3 +126,55 @@ def get_file(src, dst, permissions=None):
if permissions:
os.chmod(dst, permissions)
return dst
def get_file(src, dst, permissions=None, hash_expected=None,
hash_algorithm="md5", download_retries=1):
"""
Gets a file from a source location, optionally using caching.
If no hash_expected is provided, simply download the file. Else,
keep trying to download the file until download_failures exceeds
download_retries or the hashes match.
If the hashes match, return dst. If download_failures exceeds
download_retries, raise an EnvironmentError.
:param src: source path or URL. May be local or a remote file.
:param dst: destination path.
:param permissions: (optional) set access permissions.
:param hash_expected: Hash string that we expect the file downloaded to
have.
:param hash_algorithm: Algorithm used to calculate the hash string
(md5, sha1).
:param download_retries: Number of times we are going to retry a failed
download.
:raise: EnvironmentError.
:return: destination path.
"""
def _verify_hash(filename):
if os.path.isfile(filename):
return crypto.hash_file(filename, algorithm=hash_algorithm)
return None
if hash_expected is None:
return _get_file(src, dst, permissions)
download_failures = 0
hash_file = _verify_hash(dst)
while not hash_file == hash_expected:
hash_file = _verify_hash(_get_file(src, dst, permissions))
if hash_file != hash_expected:
log.error("It seems that dst %s is corrupted" % dst)
download_failures += 1
if download_failures > download_retries:
raise EnvironmentError("Failed to retrieve %s. "
"Possible reasons - Network connectivity "
"problems or incorrect hash_expected "
"provided -> '%s'" %
(src, hash_expected))
else:
log.error("Retrying download of src %s", src)
return dst
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2015
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
"""
APIs to download/update git repositories from inside python scripts.
"""
import os
import logging
from . import process
from . import astring
from . import path
__all__ = ["GitRepoHelper", "get_repo"]
class GitRepoHelper(object):
"""
Helps to deal with git repos, mostly fetching content from a repo
"""
def __init__(self, uri, branch='master', lbranch='master', commit=None,
destination_dir=None, base_uri=None):
"""
Instantiates a new GitRepoHelper
:type uri: string
:param uri: git repository url
:type branch: string
:param branch: git remote branch
:type destination_dir: string
:param destination_dir: path of a dir where to save downloaded code
:type commit: string
:param commit: specific commit to download
:type lbranch: string
:param lbranch: git local branch name, if different from remote
:type base_uri: string
:param base_uri: a closer, usually local, git repository url from where
to fetch content first
"""
self.uri = uri
self.base_uri = base_uri
self.branch = branch
self.commit = commit
if destination_dir is None:
uri_basename = uri.split("/")[-1]
self.destination_dir = os.path.join("/tmp", uri_basename)
else:
self.destination_dir = destination_dir
if lbranch is None:
self.lbranch = branch
else:
self.lbranch = lbranch
self.cmd = path.find_command('git')
def init(self):
"""
Initializes a directory for receiving a verbatim copy of git repo
This creates a directory if necessary, and either resets or inits
the repo
"""
if not os.path.exists(self.destination_dir):
logging.debug('Creating directory %s for git repo %s',
self.destination_dir, self.uri)
os.makedirs(self.destination_dir)
os.chdir(self.destination_dir)
if os.path.exists('.git'):
logging.debug('Resetting previously existing git repo at %s for '
'receiving git repo %s',
self.destination_dir, self.uri)
self.git_cmd('reset --hard')
else:
logging.debug('Initializing new git repo at %s for receiving '
'git repo %s',
self.destination_dir, self.uri)
self.git_cmd('init')
def git_cmd(self, cmd, ignore_status=False):
"""
Wraps git commands.
:param cmd: Command to be executed.
:param ignore_status: Whether we should suppress error.CmdError
exceptions if the command did return exit code !=0 (True), or
not suppress them (False).
"""
os.chdir(self.destination_dir)
return process.run(r"%s %s" % (self.cmd, astring.shell_escape(cmd)),
ignore_status=ignore_status)
def fetch(self, uri):
"""
Performs a git fetch from the remote repo
"""
logging.info("Fetching git [REP '%s' BRANCH '%s'] -> %s",
uri, self.branch, self.destination_dir)
self.git_cmd("fetch -q -f -u -t %s %s:%s" %
(uri, self.branch, self.lbranch))
def get_top_commit(self):
"""
Returns the topmost commit id for the current branch.
:return: Commit id.
"""
return self.git_cmd('log --pretty=format:%H -1').stdout.strip()
def get_top_tag(self):
"""
Returns the topmost tag for the current branch.
:return: Tag.
"""
try:
return self.git_cmd('describe').stdout.strip()
except process.CmdError:
return None
def checkout(self, branch=None, commit=None):
"""
Performs a git checkout for a given branch and start point (commit)
:param branch: Remote branch name.
:param commit: Specific commit hash.
"""
if branch is None:
branch = self.branch
logging.debug('Checking out branch %s', branch)
self.git_cmd("checkout %s" % branch)
if commit is None:
commit = self.commit
if commit is not None:
logging.debug('Checking out commit %s', self.commit)
self.git_cmd("checkout %s" % self.commit)
else:
logging.debug('Specific commit not specified')
top_commit = self.get_top_commit()
top_tag = self.get_top_tag()
if top_tag is None:
top_tag_desc = 'no tag found'
else:
top_tag_desc = 'tag %s' % top_tag
logging.info("git commit ID is %s (%s)", top_commit, top_tag_desc)
def execute(self):
"""
Performs all steps necessary to initialize and download a git repo.
This includes the init, fetch and checkout steps in one single
utility method.
"""
self.init()
if self.base_uri is not None:
self.fetch(self.base_uri)
self.fetch(self.uri)
self.checkout()
def get_repo(uri, branch='master', lbranch='master', commit=None,
destination_dir=None, base_uri=None):
"""
Utility function that retrieves a given git code repository.
:type uri: string
:param uri: git repository url
:type branch: string
:param branch: git remote branch
:type destination_dir: string
:param destination_dir: path of a dir where to save downloaded code
:type commit: string
:param commit: specific commit to download
:type lbranch: string
:param lbranch: git local branch name, if different from remote
:type base_uri: string
:param uri: a closer, usually local, git repository url from where to
fetch content first from
"""
repo = GitRepoHelper(uri, branch, lbranch, commit, destination_dir,
base_uri)
repo.execute()
return repo.destination_dir
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2015
# Author: Cleber Rosa <crosa@redhat.com>
"""
Basic ISO9660 file-system support.
This code does not attempt (so far) to implement code that knows about
ISO9660 internal structure. Instead, it uses commonly available support
either in userspace tools or on the Linux kernel itself (via mount).
"""
__all__ = ['iso9660', 'Iso9660IsoInfo', 'Iso9660IsoRead', 'Iso9660Mount']
import os
import logging
import tempfile
import shutil
import re
from . import process
def has_userland_tool(executable):
"""
Returns whether the system has a given executable
:param executable: the name of the executable
:type executable: str
:rtype: bool
"""
if os.path.isabs(executable):
return os.path.exists(executable)
else:
for d in os.environ['PATH'].split(':'):
f = os.path.join(d, executable)
if os.path.exists(f):
return True
return False
def has_isoinfo():
"""
Returns whether the system has the isoinfo executable
Maybe more checks could be added to see if isoinfo supports the needed
features
:rtype: bool
"""
return has_userland_tool('isoinfo')
def has_isoread():
"""
Returns whether the system has the iso-read executable
Maybe more checks could be added to see if iso-read supports the needed
features
:rtype: bool
"""
return has_userland_tool('iso-read')
def can_mount():
"""
Test wether the current user can perform a loop mount
AFAIK, this means being root, having mount and iso9660 kernel support
:rtype: bool
"""
if os.getuid() != 0:
logging.debug('Can not use mount: current user is not "root"')
return False
if not has_userland_tool('mount'):
logging.debug('Can not use mount: missing "mount" tool')
return False
if 'iso9660' not in open('/proc/filesystems').read():
logging.debug('Can not use mount: lack of iso9660 kernel support')
return False
return True
class BaseIso9660(object):
"""
Represents a ISO9660 filesystem
This class holds common functionality and has many abstract methods
"""
def __init__(self, path):
self.path = path
self._verify_path(path)
@staticmethod
def _verify_path(path):
"""
Verify that the current set path is accessible
:param path: the path for test
:type path: str
:raise OSError: path does not exist or path could not be read
:rtype: None
"""
if not os.path.exists(path):
raise OSError('File or device path does not exist: %s' %
path)
if not os.access(path, os.R_OK):
raise OSError('File or device path could not be read: %s' %
path)
def read(self, path):
"""
Abstract method to read data from path
:param path: path to the file
:returns: data content from the file
:rtype: str
"""
raise NotImplementedError
def copy(self, src, dst):
"""
Simplistic version of copy that relies on read()
:param src: source path
:type src: str
:param dst: destination path
:type dst: str
:rtype: None
"""
content = self.read(src)
output = open(dst, 'w+b')
output.write(content)
output.close()
def close(self):
"""
Cleanup and free any resources being used
:rtype: None
"""
pass
class Iso9660IsoInfo(BaseIso9660):
"""
Represents a ISO9660 filesystem
This implementation is based on the cdrkit's isoinfo tool
"""
def __init__(self, path):
super(Iso9660IsoInfo, self).__init__(path)
self.joliet = False
self.rock_ridge = False
self.el_torito = False
self._get_extensions(path)
def _get_extensions(self, path):
cmd = 'isoinfo -i %s -d' % path
output = process.system_output(cmd)
if re.findall("\nJoliet", output):
self.joliet = True
if re.findall("\nRock Ridge signatures", output):
self.rock_ridge = True
if re.findall("\nEl Torito", output):
self.el_torito = True
@staticmethod
def _normalize_path(path):
if not os.path.isabs(path):
path = os.path.join('/', path)
return path
def _get_filename_in_iso(self, path):
cmd = 'isoinfo -i %s -f' % self.path
flist = process.system_output(cmd)
fname = re.findall("(%s.*)" % self._normalize_path(path), flist, re.I)
if fname:
return fname[0]
return None
def read(self, path):
cmd = ['isoinfo', '-i %s' % self.path]
fname = self._normalize_path(path)
if self.joliet:
cmd.append("-J")
elif self.rock_ridge:
cmd.append("-R")
else:
fname = self._get_filename_in_iso(path)
if not fname:
logging.warn("Could not find '%s' in iso '%s'", path, self.path)
return ""
cmd.append("-x %s" % fname)
result = process.run(" ".join(cmd))
return result.stdout
class Iso9660IsoRead(BaseIso9660):
"""
Represents a ISO9660 filesystem
This implementation is based on the libcdio's iso-read tool
"""
def __init__(self, path):
super(Iso9660IsoRead, self).__init__(path)
self.temp_dir = tempfile.mkdtemp()
def read(self, path):
temp_file = os.path.join(self.temp_dir, path)
cmd = 'iso-read -i %s -e %s -o %s' % (self.path, path, temp_file)
process.run(cmd)
return open(temp_file).read()
def copy(self, src, dst):
cmd = 'iso-read -i %s -e %s -o %s' % (self.path, src, dst)
process.run(cmd)
def close(self):
shutil.rmtree(self.temp_dir, True)
class Iso9660Mount(BaseIso9660):
"""
Represents a mounted ISO9660 filesystem.
"""
def __init__(self, path):
"""
initializes a mounted ISO9660 filesystem
:param path: path to the ISO9660 file
:type path: str
"""
super(Iso9660Mount, self).__init__(path)
self.mnt_dir = tempfile.mkdtemp()
process.run('mount -t iso9660 -v -o loop,ro %s %s' %
(path, self.mnt_dir))
def read(self, path):
"""
Read data from path
:param path: path to read data
:type path: str
:return: data content
:rtype: str
"""
full_path = os.path.join(self.mnt_dir, path)
return open(full_path).read()
def copy(self, src, dst):
"""
:param src: source
:type src: str
:param dst: destination
:type dst: str
:rtype: None
"""
full_path = os.path.join(self.mnt_dir, src)
shutil.copy(full_path, dst)
def close(self):
"""
Perform umount operation on the temporary dir
:rtype: None
"""
if os.path.ismount(self.mnt_dir):
process.run('fuser -k %s' % self.mnt_dir, ignore_status=True)
process.run('umount %s' % self.mnt_dir)
shutil.rmtree(self.mnt_dir)
def iso9660(path):
"""
Checks the avaiable tools on a system and chooses class accordingly
This is a convinience function, that will pick the first avaialable
iso9660 capable tool.
:param path: path to an iso9660 image file
:type path: str
:return: an instance of any iso9660 capable tool
:rtype: :class:`Iso9660IsoInfo`, :class:`Iso9660IsoRead`,
:class:`Iso9660Mount` or None
"""
implementations = [('isoinfo', has_isoinfo, Iso9660IsoInfo),
('iso-read', has_isoread, Iso9660IsoRead),
('mount', can_mount, Iso9660Mount)]
for (name, check, klass) in implementations:
if check():
logging.debug('Automatically chosen class for iso9660: %s', name)
return klass(path)
return None
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# This code was inspired in the autotest project,
#
# client/base_utils.py
# Original author: Ross Brattain <ross.b.brattain@intel.com>
"""
APIs to list and load/unload linux kernel modules.
"""
import re
import logging
from . import process
LOG = logging.getLogger('avocado.test')
def load_module(module_name):
# Checks if a module has already been loaded
if module_is_loaded(module_name):
return False
process.system('/sbin/modprobe ' + module_name)
return True
def parse_lsmod_for_module(l_raw, module_name, escape=True):
"""
Use a regexp to parse raw lsmod output and get module information
:param l_raw: raw output of lsmod
:type l_raw: str
:param module_name: Name of module to search for
:type module_name: str
:param escape: Escape regexp tokens in module_name, default True
:type escape: bool
:return: Dictionary of module info, name, size, submodules if present
:rtype: dict
"""
# re.escape the module name for safety
if escape:
module_search = re.escape(module_name)
else:
module_search = module_name
# ^module_name spaces size spaces used optional spaces optional submodules
# use multiline regex to scan the entire output as one string without
# having to splitlines use named matches so we can extract the dictionary
# with groupdict
pattern = (r"^(?P<name>%s)\s+(?P<size>\d+)\s+(?P<used>\d+)"
"\s*(?P<submodules>\S+)?$")
lsmod = re.search(pattern % module_search, l_raw, re.M)
if lsmod:
# default to empty list if no submodules
module_info = lsmod.groupdict([])
# convert size to integer because it is an integer
module_info['size'] = int(module_info['size'])
module_info['used'] = int(module_info['used'])
if module_info['submodules']:
module_info['submodules'] = module_info['submodules'].split(',')
return module_info
else:
# return empty dict to be consistent
return {}
def loaded_module_info(module_name):
"""
Get loaded module details: Size and Submodules.
:param module_name: Name of module to search for
:type module_name: str
:return: Dictionary of module info, name, size, submodules if present
:rtype: dict
"""
l_raw = process.system_output('/sbin/lsmod')
return parse_lsmod_for_module(l_raw, module_name)
def get_submodules(module_name):
"""
Get all submodules of the module.
:param module_name: Name of module to search for
:type module_name: str
:return: List of the submodules
:rtype: list
"""
module_info = loaded_module_info(module_name)
module_list = []
try:
submodules = module_info["submodules"]
except KeyError:
LOG.info("Module %s is not loaded" % module_name)
else:
module_list = submodules
for module in submodules:
module_list += get_submodules(module)
return module_list
def unload_module(module_name):
"""
Removes a module. Handles dependencies. If even then it's not possible
to remove one of the modules, it will throw an error.CmdError exception.
:param module_name: Name of the module we want to remove.
:type module_name: str
"""
module_info = loaded_module_info(module_name)
try:
submodules = module_info['submodules']
except KeyError:
LOG.info("Module %s is already unloaded" % module_name)
else:
for module in submodules:
unload_module(module)
module_info = loaded_module_info(module_name)
try:
module_used = module_info['used']
except KeyError:
LOG.info("Module %s is already unloaded" % module_name)
return
if module_used != 0:
raise RuntimeError("Module %s is still in use. "
"Can not unload it." % module_name)
process.system("/sbin/modprobe -r %s" % module_name)
LOG.info("Module %s unloaded" % module_name)
def module_is_loaded(module_name):
"""
Is module loaded
:param module_name: Name of module to search for
:type module_name: str
:return: True is module is loaded
:rtype: bool
"""
module_name = module_name.replace('-', '_')
return bool(loaded_module_info(module_name))
def get_loaded_modules():
lsmod_output = process.system_output('/sbin/lsmod').splitlines()[1:]
return [line.split(None, 1)[0] for line in lsmod_output]
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# This code was inspired in the autotest project,
#
# client/shared/utils.py
# Original author: Cleber Rosa <crosa@redhat.com>
#
# Copyright: Red Hat Inc. 2015
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
"""
Utility functions for user friendly display of information.
"""
import sys
def display_data_size(size):
"""
Display data size in human readable units (SI).
:param size: Data size, in Bytes.
:type size: int
:return: Human readable string with data size, using SI prefixes.
"""
prefixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
factor = float(1000)
i = 0
while size >= factor:
if i >= len(prefixes) - 1:
break
size /= factor
i += 1
return '%.2f %s' % (size, prefixes[i])
class ProgressBar(object):
"""
Displays interactively the progress of a given task
Inspired/adapted from https://gist.github.com/t0xicCode/3306295
"""
def __init__(self, minimum=0, maximum=100, width=75, title=''):
"""
Initializes a new progress bar
:type minimum: integer
:param minimum: minimum (initial) value on the progress bar
:type maximum: integer
:param maximum: maximum (final) value on the progress bar
:type width: integer
:param with: number of columns, that is screen width
"""
assert maximum > minimum
self.prog_bar = ''
self.old_prog_bar = ''
if title:
width -= len(title)
self.minimum = minimum
self.maximum = maximum
self.range = maximum - minimum
self.width = width
self.title = title
self.current_amount = minimum
self.update_amount(minimum)
def append_amount(self, amount):
"""
Increments the current amount value.
"""
self.update_amount(self.current_amount + amount)
def update_percentage(self, percentage):
"""
Updates the progress bar to the new percentage.
"""
self.update_amount((percentage * float(self.maximum)) / 100.0)
def update_amount(self, amount):
"""
Performs sanity checks and update the current amount.
"""
if amount < self.minimum:
amount = self.minimum
if amount > self.maximum:
amount = self.maximum
self.current_amount = amount
self._update_progress_bar()
self.draw()
def _update_progress_bar(self):
"""
Builds the actual progress bar text.
"""
diff = float(self.current_amount - self.minimum)
done = (diff / float(self.range)) * 100.0
done = int(round(done))
all_full = self.width - 2
hashes = (done / 100.0) * all_full
hashes = int(round(hashes))
if hashes == 0:
screen_text = "[>%s]" % (' '*(all_full-1))
elif hashes == all_full:
screen_text = "[%s]" % ('='*all_full)
else:
screen_text = "[%s>%s]" % ('='*(hashes-1), ' '*(all_full-hashes))
percent_string = str(done) + "%"
# slice the percentage into the bar
screen_text = ' '.join([screen_text, percent_string])
if self.title:
screen_text = '%s: %s' % (self.title,
screen_text)
self.prog_bar = screen_text
def draw(self):
"""
Prints the updated text to the screen.
"""
if self.prog_bar != self.old_prog_bar:
self.old_prog_bar = self.prog_bar
sys.stdout.write('\r' + self.prog_bar)
sys.stdout.flush()
def __str__(self):
"""
Returns the current progress bar.
"""
return str(self.prog_bar)
......@@ -158,7 +158,7 @@ def process_in_ptree_is_defunct(ppid):
return True
for pid in pids:
cmd = "ps --no-headers -o cmd %d" % int(pid)
proc_name = system_output(cmd, ignore_status=True)
proc_name = system_output(cmd, ignore_status=True, verbose=False)
if '<defunct>' in proc_name:
defunct = True
break
......@@ -171,7 +171,7 @@ def get_children_pids(ppid):
param ppid: parent PID
return: list of PIDs of all children/threads of ppid
"""
return system_output("ps -L --ppid=%d -o lwp" % ppid).split('\n')[1:]
return system_output("ps -L --ppid=%d -o lwp" % ppid, verbose=False).split('\n')[1:]
class CmdResult(object):
......
# Copyright(c) 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# This code was inspired in the autotest project,
#
# client/shared/service.py
# Original author: Ross Brattain <ross.b.brattain@intel.com>
import os
import re
import logging
from tempfile import mktemp
from . import process
LOG = logging.getLogger('avocado.test')
_COMMAND_TABLE_DOC = """
Taken from http://fedoraproject.org/wiki/SysVinit_to_Systemd_Cheatsheet
service frobozz start
systemctl start frobozz.service
Used to start a service (not reboot persistent)
service frobozz stop
systemctl stop frobozz.service
Used to stop a service (not reboot persistent)
service frobozz restart
systemctl restart frobozz.service
Used to stop and then start a service
service frobozz reload
systemctl reload frobozz.service
When supported, reloads the config file without interrupting pending
operations.
service frobozz condrestart
systemctl condrestart frobozz.service
Restarts if the service is already running.
service frobozz status
systemctl status frobozz.service
Tells whether a service is currently running.
ls /etc/rc.d/init.d/
systemctl list-unit-files --type=service (preferred)
Used to list the services that can be started or stopped
ls /lib/systemd/system/*.service /etc/systemd/system/*.service
Used to list all the services and other units
chkconfig frobozz on
systemctl enable frobozz.service
Turn the service on, for start at next boot, or other trigger.
chkconfig frobozz off
systemctl disable frobozz.service
Turn the service off for the next reboot, or any other trigger.
chkconfig frobozz
systemctl is-enabled frobozz.service
Used to check whether a service is configured to start or not in the current
environment.
chkconfig --list
systemctl list-unit-files --type=service(preferred)
ls /etc/systemd/system/*.wants/
Print a table of services that lists which runlevels each is configured on
or off
chkconfig frobozz --list
ls /etc/systemd/system/*.wants/frobozz.service
Used to list what levels this service is configured on or off
chkconfig frobozz --add
systemctl daemon-reload
Used when you create a new service file or modify any configuration
"""
def sys_v_init_result_parser(command):
"""
Parse results from sys_v style commands.
command status: return true if service is running.
command is_enabled: return true if service is enalbled.
command list: return a dict from service name to status.
command others: return true if operate success.
:param command: command.
:type command: str.
:return: different from the command.
"""
if command == "status":
def method(cmd_result):
"""
Parse method for service XXX status.
Returns True if XXX is running.
Returns False if XXX is stopped.
Returns None if XXX is unrecognized.
"""
# If service is stopped, exit_status is also not zero.
# So, we can't use exit_status to check result.
output = cmd_result.stdout.lower()
# Returns None if XXX is unrecognized.
if re.search(r"unrecognized", output):
return None
# Returns False if XXX is stopped.
dead_flags = [r"stopped", r"not running", r"dead"]
for flag in dead_flags:
if re.search(flag, output):
return False
# If output does not contain a dead flag, check it with "running".
return bool(re.search(r"running", output))
return method
elif command == "list":
def method(cmd_result):
"""
Parse method for service XXX list.
Return dict from service name to status.
>>> {"sshd": {0: 'off', 1: 'off', 2: 'off', 3: 'off', 4: 'off',
>>> 5: 'off', 6: 'off'},
>>> "vsftpd": {0: 'off', 1: 'off', 2: 'off', 3: 'off', 4: 'off',
>>> 5: 'off', 6: 'off'},
>>> "xinetd": {'discard-dgram:': 'off',
>>> 'rsync:': 'off'...'chargen-stream:': 'off'},
>>> ...
>>> }
"""
if cmd_result.exit_status:
raise process.CmdError(cmd_result.command, cmd_result)
# The final dict to return.
_service2statusOnTarget_dict = {}
# Dict to store status on every target for each service.
_status_on_target = {}
# Dict to store the status for service based on xinetd.
_service2statusOnXinet_dict = {}
lines = cmd_result.stdout.strip().splitlines()
for line in lines:
sublines = line.strip().split()
if len(sublines) == 8:
# Service and status on each target.
service_name = sublines[0]
# Store the status of each target in _status_on_target.
for target in range(7):
status = sublines[target + 1].split(":")[-1]
_status_on_target[target] = status
_service2statusOnTarget_dict[service_name] = (
_status_on_target.copy())
elif len(sublines) == 2:
# Service based on xinetd.
service_name = sublines[0].strip(":")
status = sublines[-1]
_service2statusOnXinet_dict[service_name] = status
else:
# Header or some lines useless.
continue
# Add xinetd based service in the main dict.
_service2statusOnTarget_dict["xinetd"] = _service2statusOnXinet_dict
return _service2statusOnTarget_dict
return method
else:
return _ServiceResultParser.default_method
def systemd_result_parser(command):
"""
Parse results from systemd style commands.
command status: return true if service is running.
command is_enabled: return true if service is enalbled.
command list: return a dict from service name to status.
command others: return true if operate success.
:param command: command.
:type command: str.
:return: different from the command.
"""
if command == "status":
def method(cmd_result):
"""
Parse method for systemctl status XXX.service.
Returns True if XXX.service is running.
Returns False if XXX.service is stopped.
Returns None if XXX.service is not loaded.
"""
# If service is stopped, exit_status is also not zero.
# So, we can't use exit_status to check result.
output = cmd_result.stdout
# Returns None if XXX is not loaded.
if not re.search(r"Loaded: loaded", output):
return None
# Check it with Active status.
return output.count("Active: active") > 0
return method
elif command == "list":
def method(cmd_result):
"""
Parse method for systemctl list XXX.service.
Return a dict from service name to status.
e.g:
{"sshd": "enabled",
"vsftpd": "disabled",
"systemd-sysctl": "static",
...
}
"""
if cmd_result.exit_status:
raise process.CmdError(cmd_result.command, cmd_result)
# Dict to store service name to status.
_service2status_dict = {}
lines = cmd_result.stdout.strip().splitlines()
for line in lines:
sublines = line.strip().split()
if ((not len(sublines) == 2) or
(not sublines[0].endswith("service"))):
# Some lines useless.
continue
service_name = sublines[0].rstrip(".service")
status = sublines[-1]
_service2status_dict[service_name] = status
return _service2status_dict
return method
else:
return _ServiceResultParser.default_method
def sys_v_init_command_generator(command):
"""
Generate lists of command arguments for sys_v style inits.
:param command: start,stop,restart, etc.
:type command: str
:return: list of commands to pass to process.run or similar function
:rtype: list
"""
command_name = "service"
if command == "is_enabled":
command_name = "chkconfig"
command = ""
elif command == 'enable':
command_name = "chkconfig"
command = "on"
elif command == 'disable':
command_name = "chkconfig"
command = "off"
elif command == 'list':
# noinspection PyUnusedLocal
def list_command(service_name):
return ["chkconfig", "--list"]
return list_command
elif command == "set_target":
def set_target_command(target):
target = convert_systemd_target_to_runlevel(target)
return ["telinit", target]
return set_target_command
def method(service_name):
return [command_name, service_name, command]
return method
def systemd_command_generator(command):
"""
Generate list of command line argument strings for systemctl.
One argument per string for compatibility Popen
WARNING: If systemctl detects that it is running on a tty it will use color,
pipe to $PAGER, change column sizes and not truncate unit names.
Use --no-pager to suppress pager output, or set PAGER=cat in the
environment. You may need to take other steps to suppress color output.
See https://bugzilla.redhat.com/show_bug.cgi?id=713567
:param command: start,stop,restart, etc.
:type command: str
:return: List of command and arguments to pass to process.run or similar
functions
:rtype: list
"""
command_name = "systemctl"
if command == "is_enabled":
command = "is-enabled"
elif command == "list":
# noinspection PyUnusedLocal
def list_command(service_name):
# systemctl pipes to `less` or $PAGER by default. Workaround this
# add '--full' to avoid systemctl truncates service names.
return [command_name, "list-unit-files",
"--type=service", "--no-pager", "--full"]
return list_command
elif command == "set_target":
def set_target_command(target):
return [command_name, "isolate", target]
return set_target_command
def method(service_name):
return [command_name, command, "%s.service" % service_name]
return method
COMMANDS = (
"start",
"stop",
"reload",
"restart",
"condrestart",
"status",
"enable",
"disable",
"is_enabled",
"list",
"set_target",
)
class _ServiceResultParser(object):
"""
A class that contains staticmethods to parse the result of service command.
"""
def __init__(self, result_parser, command_list=COMMANDS):
"""
Create staticmethods for each command in command_list using setattr.
:param result_parser: function that generates functions that parse the
result of command.
:type result_parser: function
:param command_list: list of all the commands, e.g. start, stop,
restart, etc.
:type command_list: list
"""
self.commands = command_list
for command in self.commands:
setattr(self, command, result_parser(command))
@staticmethod
def default_method(cmd_result):
"""
Default method to parse result from command which is not 'list'/'status'
Returns True if command was executed successfully.
"""
if cmd_result.exit_status:
LOG.debug(cmd_result)
return False
else:
return True
class _ServiceCommandGenerator(object):
"""
Generate command lists for starting/stopping services.
"""
def __init__(self, command_generator, command_list=COMMANDS):
"""
Create staticmethods for each command in command_list.
:param command_generator: function that generates functions that
generate lists of command strings
:type command_generator: function
:param command_list: list of all the commands, e.g. start, stop,
restart, etc.
:type command_list: list
"""
self.commands = command_list
for command in self.commands:
setattr(self, command, command_generator(command))
def _get_name_of_init(run=process.run):
"""
Internal function to determine what executable is PID 1
It does that by checking /proc/1/exe.
:return: executable name for PID 1, aka init
:rtype: str
"""
# /proc/1/comm was added in 2.6.33 and is not in RHEL6.x, so use cmdline
# Non-root can read cmdline
# return os.path.basename(open("/proc/1/cmdline").read().split(chr(0))[0])
# readlink /proc/1/exe requires root
# inspired by openvswitch.py:ServiceManagerInterface.get_version()
output = run("readlink /proc/1/exe").stdout.strip()
return os.path.basename(output)
def get_name_of_init(run=process.run):
"""
Determine what executable is PID 1, aka init by checking /proc/1/exe
This init detection will only run once and cache the return value.
:return: executable name for PID 1, aka init
:rtype: str
"""
# _init_name is explicitly undefined so that we get the NameError on
# first access
# pylint: disable=W0601
global _init_name
try:
return _init_name
except (NameError, AttributeError):
_init_name = _get_name_of_init(run)
return _init_name
class _SpecificServiceManager(object):
def __init__(self, service_name, service_command_generator,
service_result_parser, run=process.run):
"""
Create staticmethods that call process.run with the given service_name
>>> my_generator = auto_create_specific_service_command_generator
>>> lldpad = SpecificServiceManager("lldpad", my_generator())
>>> lldpad.start()
>>> lldpad.stop()
:param service_name: init service name or systemd unit name
:type service_name: str
:param service_command_generator: a sys_v_init or systemd command
generator
:type service_command_generator: _ServiceCommandGenerator
:param run: function that executes the commands and return CmdResult
object, default process.run
:type run: function
"""
for cmd in service_command_generator.commands:
run_func = run
parse_func = getattr(service_result_parser, cmd)
command = getattr(service_command_generator, cmd)
setattr(self, cmd,
self.generate_run_function(run_func=run_func,
parse_func=parse_func,
command=command,
service_name=service_name))
@staticmethod
def generate_run_function(run_func, parse_func, command, service_name):
"""
Generate the wrapped call to process.run for the given service_name.
:param run_func: function to execute command and return CmdResult
object.
:type run_func: function
:param parse_func: function to parse the result from run.
:type parse_func: function
:param command: partial function that generates the command list
:type command: function
:param service_name: init service name or systemd unit name
:type service_name: str
:return: wrapped process.run function.
:rtype: function
"""
def run(**kwargs):
"""
Wrapped process.run invocation that will start/stop/etc a service.
:param kwargs: extra arguments to process.run, .e.g. timeout.
But not for ignore_status.
We need a CmdResult to parse and raise an
exception.TestError if command failed.
We will not let the CmdError out.
:return: result of parse_func.
"""
# If run_func is process.run by default, we need to set
# ignore_status = True. Otherwise, skip this setting.
if run_func is process.run:
LOG.debug("Setting ignore_status to True.")
kwargs["ignore_status"] = True
result = run_func(" ".join(command(service_name)), **kwargs)
return parse_func(result)
return run
class _GenericServiceManager(object):
"""
Base class for SysVInitServiceManager and SystemdServiceManager.
"""
def __init__(self, service_command_generator, service_result_parser,
run=process.run):
"""
Create staticmethods for each service command, e.g. start, stop
These staticmethods take as an argument the service to be started or
stopped.
>>> my_generator = auto_create_specific_service_command_generator
>>> systemd = SpecificServiceManager(my_generator())
>>> systemd.start("lldpad")
>>> systemd.stop("lldpad")
:param service_command_generator: a sys_v_init or systemd command
generator
:type service_command_generator: _ServiceCommandGenerator
:param run: function to call the run the commands, default process.run
:type run: function
"""
for cmd in service_command_generator.commands:
parse_func = getattr(service_result_parser, cmd)
command = getattr(service_command_generator, cmd)
setattr(self, cmd,
self.generate_run_function(run_func=run,
parse_func=parse_func,
command=command))
@staticmethod
def generate_run_function(run_func, parse_func, command):
"""
Generate the wrapped call to process.run for the service command.
:param run_func: process.run
:type run_func: function
:param command: partial function that generates the command list
:type command: function
:return: wrapped process.run function.
:rtype: function
"""
def run(service="", **kwargs):
"""
Wrapped process.run invocation that will start/stop/etc. a service.
:param service: service name, e.g. crond, dbus, etc.
:param kwargs: extra arguments to process.run, .e.g. timeout.
But not for ignore_status.
We need a CmdResult to parse and raise a exception.TestError
if command failed. We will not let the CmdError out.
:return: result of parse_func.
"""
# If run_func is process.run by default, we need to set
# ignore_status = True. Otherwise, skip this setting.
if run_func is process.run:
LOG.debug("Setting ignore_status to True.")
kwargs["ignore_status"] = True
result = run_func(" ".join(command(service)), **kwargs)
return parse_func(result)
return run
class _SysVInitServiceManager(_GenericServiceManager):
"""
Concrete class that implements the SysVInitServiceManager
"""
def __init__(self, service_command_generator, service_result_parser,
run=process.run):
"""
Create the GenericServiceManager for SysV services.
:param service_command_generator:
:type service_command_generator: _ServiceCommandGenerator
:param run: function to call to run the commands, default process.run
:type run: function
"""
super(_SysVInitServiceManager, self).__init__(service_command_generator,
service_result_parser,
run)
def convert_sysv_runlevel(level):
"""
Convert runlevel to systemd target.
:param level: sys_v runlevel
:type level: str or int
:return: systemd target
:rtype: str
:raise ValueError: when runlevel is unknown
"""
runlevel = str(level)
if runlevel == '0':
target = "poweroff.target"
elif runlevel in ['1', "s", "single"]:
target = "rescue.target"
elif runlevel in ['2', '3', '4']:
target = "multi-user.target"
elif runlevel == '5':
target = "graphical.target"
elif runlevel == '6':
target = "reboot.target"
else:
raise ValueError("unknown runlevel %s" % level)
return target
def convert_systemd_target_to_runlevel(target):
"""
Convert systemd target to runlevel.
:param target: systemd target
:type target: str
:return: sys_v runlevel
:rtype: str
:raise ValueError: when systemd target is unknown
"""
if target == "poweroff.target":
runlevel = '0'
elif target == "rescue.target":
runlevel = 's'
elif target == "multi-user.target":
runlevel = '3'
elif target == "graphical.target":
runlevel = '5'
elif target == "reboot.target":
runlevel = '6'
else:
raise ValueError("unknown target %s" % target)
return runlevel
class _SystemdServiceManager(_GenericServiceManager):
"""
Concrete class that implements the SystemdServiceManager
"""
def __init__(self, service_command_generator, service_result_parser,
run=process.run):
"""
Create the GenericServiceManager for systemd services.
:param service_command_generator:
:type service_command_generator: _ServiceCommandGenerator
:param run: function to call to run the commands, default process.run
:type run: function
"""
super(_SystemdServiceManager, self).__init__(service_command_generator,
service_result_parser, run)
@staticmethod
def change_default_runlevel(runlevel='multi-user.target'):
# atomic symlinking, symlink and then rename
"""
Set the default systemd target.
Create the symlink in a temp directory and then use
atomic rename to move the symlink into place.
:param runlevel: default systemd target
:type runlevel: str
"""
tmp_symlink = mktemp(dir="/etc/systemd/system")
os.symlink("/usr/lib/systemd/system/%s" % runlevel, tmp_symlink)
os.rename(tmp_symlink, "/etc/systemd/system/default.target")
_command_generators = {"init": sys_v_init_command_generator,
"systemd": systemd_command_generator}
_result_parsers = {"init": sys_v_init_result_parser,
"systemd": systemd_result_parser}
_service_managers = {"init": _SysVInitServiceManager,
"systemd": _SystemdServiceManager}
def _get_service_result_parser(run=process.run):
"""
Get the ServiceResultParser using the auto-detect init command.
:return: ServiceResultParser fro the current init command.
:rtype: _ServiceResultParser
"""
# pylint: disable=W0601
global _service_result_parser
try:
return _service_result_parser
except NameError:
result_parser = _result_parsers[get_name_of_init(run)]
_service_result_parser = _ServiceResultParser(result_parser)
return _service_result_parser
def _get_service_command_generator(run=process.run):
"""
Lazy initializer for ServiceCommandGenerator using the auto-detect init
command.
:return: ServiceCommandGenerator for the current init command.
:rtype: _ServiceCommandGenerator
"""
# service_command_generator is explicitly undefined so that we get the
# NameError on first access
# pylint: disable=W0601
global _service_command_generator
try:
return _service_command_generator
except NameError:
command_generator = _command_generators[get_name_of_init(run)]
_service_command_generator = _ServiceCommandGenerator(
command_generator)
return _service_command_generator
def service_manager(run=process.run):
"""
Detect which init program is being used, init or systemd and return a
class has methods to start/stop services.
# Get the system service manager
>> service_manager = ServiceManager()
# Stating service/unit "sshd"
>> service_manager.start("sshd")
# Getting a list of available units
>> units = service_manager.list()
# Disabling and stopping a list of services
>> services_to_disable = ['ntpd', 'httpd']
>> for s in services_to_disable:
>> service_manager.disable(s)
>> service_manager.stop(s)
:return: SysVInitServiceManager or SystemdServiceManager
:rtype: _GenericServiceManager
"""
internal_service_manager = _service_managers[get_name_of_init(run)]
# service_command_generator is explicitly undefined so that we get the
# NameError on first access
# pylint: disable=W0601
global _service_manager
try:
return _service_manager
except NameError:
internal_generator = _get_service_command_generator
internal_parser = _get_service_result_parser
_service_manager = internal_service_manager(internal_generator(run),
internal_parser(run))
return _service_manager
ServiceManager = service_manager
def _auto_create_specific_service_result_parser(run=process.run):
"""
Create a class that will create partial functions that generate
result_parser for the current init command.
:return: A ServiceResultParser for the auto-detected init command.
:rtype: _ServiceResultParser
"""
result_parser = _result_parsers[get_name_of_init(run)]
# remove list method
command_list = [c for c in COMMANDS if c not in ["list", "set_target"]]
return _ServiceResultParser(result_parser, command_list)
def _auto_create_specific_service_command_generator(run=process.run):
"""
Create a class that will create partial functions that generate commands
for the current init command.
>>> my_generator = auto_create_specific_service_command_generator
>>> lldpad = SpecificServiceManager("lldpad", my_generator())
>>> lldpad.start()
>>> lldpad.stop()
:return: A ServiceCommandGenerator for the auto-detected init command.
:rtype: _ServiceCommandGenerator
"""
command_generator = _command_generators[get_name_of_init(run)]
# remove list method
command_list = [c for c in COMMANDS if c not in ["list", "set_target"]]
return _ServiceCommandGenerator(command_generator, command_list)
def specific_service_manager(service_name, run=process.run):
"""
# Get the specific service manager for sshd
>>> sshd = SpecificServiceManager("sshd")
>>> sshd.start()
>>> sshd.stop()
>>> sshd.reload()
>>> sshd.restart()
>>> sshd.condrestart()
>>> sshd.status()
>>> sshd.enable()
>>> sshd.disable()
>>> sshd.is_enabled()
:param service_name: systemd unit or init.d service to manager
:type service_name: str
:return: SpecificServiceManager that has start/stop methods
:rtype: _SpecificServiceManager
"""
specific_generator = _auto_create_specific_service_command_generator
return _SpecificServiceManager(service_name,
specific_generator(run),
_get_service_result_parser(run), run)
SpecificServiceManager = specific_service_manager
......@@ -7,3 +7,5 @@ Sphinx>=1.3b1
flexmock>=0.9.7
# inspektor (static and style checks)
inspektor>=0.1.12
# mock (some unittests use it)
mock>=1.0.0
......@@ -12,3 +12,4 @@ Pillow==2.2.1
snakefood==1.4
networkx==1.9.1
pygraphviz==1.3rc2
mock==1.2.0
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
basedir = os.path.dirname(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import linux_modules
class TestLsmod(unittest.TestCase):
LSMOD_OUT = """\
Module Size Used by
ccm 17773 2
ip6t_rpfilter 12546 1
ip6t_REJECT 12939 2
xt_conntrack 12760 9
ebtable_nat 12807 0
ebtable_broute 12731 0
bridge 110862 1 ebtable_broute
stp 12868 1 bridge
llc 13941 2 stp,bridge
ebtable_filter 12827 0
ebtables 30758 3 ebtable_broute,ebtable_nat,ebtable_filter
ip6table_nat 13015 1
nf_conntrack_ipv6 18738 6
nf_defrag_ipv6 34712 1 nf_conntrack_ipv6
nf_nat_ipv6 13213 1 ip6table_nat
ip6table_mangle 12700 1
ip6table_security 12710 1
ip6table_raw 12683 1
ip6table_filter 12815 1
"""
def test_parse_lsmod(self):
lsmod_info = linux_modules.parse_lsmod_for_module(
self.LSMOD_OUT, "ebtables")
submodules = ['ebtable_broute', 'ebtable_nat', 'ebtable_filter']
assert lsmod_info['submodules'] == submodules
assert lsmod_info == {
'name': "ebtables",
'size': 30758,
'used': 3,
'submodules': submodules
}
@staticmethod
def test_parse_lsmod_is_empty():
lsmod_info = linux_modules.parse_lsmod_for_module("", "ebtables")
assert lsmod_info == {}
def test_parse_lsmod_no_submodules(self):
lsmod_info = linux_modules.parse_lsmod_for_module(self.LSMOD_OUT, "ccm")
submodules = []
assert lsmod_info['submodules'] == submodules
assert lsmod_info == {
'name': "ccm",
'size': 17773,
'used': 2,
'submodules': submodules
}
def test_parse_lsmod_single_submodules(self):
lsmod_info = linux_modules.parse_lsmod_for_module(
self.LSMOD_OUT, "bridge")
submodules = ['ebtable_broute']
assert lsmod_info['submodules'] == submodules
assert lsmod_info == {
'name': "bridge",
'size': 110862,
'used': 1,
'submodules': submodules
}
if __name__ == '__main__':
unittest.main()
import unittest
import os
import sys
# simple magic for using scripts within a source tree
basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
basedir = os.path.dirname(basedir)
if os.path.isdir(os.path.join(basedir, 'avocado')):
sys.path.append(basedir)
from avocado.utils import output
class UtilsOutputTest(unittest.TestCase):
def testDisplayDataSizeFactor1024(self):
self.assertEqual(output.display_data_size(103), '103.00 B')
self.assertEqual(output.display_data_size(1024**1), '1.02 KB')
self.assertEqual(output.display_data_size(1024**2), '1.05 MB')
self.assertEqual(output.display_data_size(1024**3), '1.07 GB')
self.assertEqual(output.display_data_size(1024**4), '1.10 TB')
self.assertEqual(output.display_data_size(1024**5), '1.13 PB')
self.assertEqual(output.display_data_size(1024**6), '1152.92 PB')
def testDisplayDataSizeFactor1000(self):
self.assertEqual(output.display_data_size(1000**1), '1.00 KB')
self.assertEqual(output.display_data_size(1000**2), '1.00 MB')
self.assertEqual(output.display_data_size(1000**3), '1.00 GB')
self.assertEqual(output.display_data_size(1000**4), '1.00 TB')
self.assertEqual(output.display_data_size(1000**5), '1.00 PB')
self.assertEqual(output.display_data_size(1000**6), '1000.00 PB')
if __name__ == '__main__':
unittest.main()
# Copyright(c) 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
import unittest
from mock import MagicMock, patch
from avocado.utils import service
class TestSystemd(unittest.TestCase):
def setUp(self):
self.service_name = "fake_service"
init_name = "systemd"
command_generator = service._command_generators[init_name]
self.service_command_generator = service._ServiceCommandGenerator(
command_generator)
def test_all_commands(self):
for cmd in (c for c in self.service_command_generator.commands if c not in ["list", "set_target"]):
ret = getattr(
self.service_command_generator, cmd)(self.service_name)
if cmd == "is_enabled":
cmd = "is-enabled"
assert ret == ["systemctl", cmd, "%s.service" % self.service_name]
def test_set_target(self):
ret = getattr(
self.service_command_generator, "set_target")("multi-user.target")
assert ret == ["systemctl", "isolate", "multi-user.target"]
class TestSysVInit(unittest.TestCase):
def setUp(self):
self.service_name = "fake_service"
init_name = "init"
command_generator = service._command_generators[init_name]
self.service_command_generator = service._ServiceCommandGenerator(
command_generator)
def test_all_commands(self):
command_name = "service"
for cmd in (c for c in self.service_command_generator.commands if c not in ["list", "set_target"]):
ret = getattr(
self.service_command_generator, cmd)(self.service_name)
if cmd == "is_enabled":
command_name = "chkconfig"
cmd = ""
elif cmd == 'enable':
command_name = "chkconfig"
cmd = "on"
elif cmd == 'disable':
command_name = "chkconfig"
cmd = "off"
assert ret == [command_name, self.service_name, cmd]
def test_set_target(self):
ret = getattr(
self.service_command_generator, "set_target")("multi-user.target")
assert ret == ["telinit", "3"]
class TestSpecificServiceManager(unittest.TestCase):
def setUp(self):
self.run_mock = MagicMock()
self.init_name = "init"
get_name_of_init_mock = MagicMock(return_value="init")
@patch.object(service, "get_name_of_init", get_name_of_init_mock)
def patch_service_command_generator():
return service._auto_create_specific_service_command_generator()
@patch.object(service, "get_name_of_init", get_name_of_init_mock)
def patch_service_result_parser():
return service._auto_create_specific_service_result_parser()
service_command_generator = patch_service_command_generator()
service_result_parser = patch_service_result_parser()
self.service_manager = service._SpecificServiceManager(
"boot.lldpad", service_command_generator,
service_result_parser, self.run_mock)
def test_start(self):
service = "lldpad"
self.service_manager.start()
assert self.run_mock.call_args[0][
0] == "service boot.%s start" % service
def test_stop_with_args(self):
service = "lldpad"
self.service_manager.stop(ignore_status=True)
assert self.run_mock.call_args[0][
0] == "service boot.%s stop" % service
def test_list_is_not_present_in_SpecifcServiceManager(self):
assert not hasattr(self.service_manager, "list")
def test_set_target_is_not_present_in_SpecifcServiceManager(self):
assert not hasattr(self.service_manager, "set_target")
class TestServiceManager(unittest.TestCase):
@staticmethod
def get_service_manager_from_init_and_run(init_name, run_mock):
command_generator = service._command_generators[init_name]
result_parser = service._result_parsers[init_name]
service_manager = service._service_managers[init_name]
service_command_generator = service._ServiceCommandGenerator(
command_generator)
service_result_parser = service._ServiceResultParser(result_parser)
return service_manager(service_command_generator, service_result_parser, run_mock)
class TestSystemdServiceManager(TestServiceManager):
def setUp(self):
self.run_mock = MagicMock()
self.init_name = "systemd"
self.service_manager = super(TestSystemdServiceManager,
self).get_service_manager_from_init_and_run(self.init_name,
self.run_mock)
def test_start(self):
service = "lldpad"
self.service_manager.start(service)
assert self.run_mock.call_args[0][
0] == "systemctl start %s.service" % service
def test_list(self):
list_result_mock = MagicMock(exit_status=0, stdout="sshd.service enabled\n"
"vsftpd.service disabled\n"
"systemd-sysctl.service static\n")
run_mock = MagicMock(return_value=list_result_mock)
service_manager = super(TestSystemdServiceManager,
self).get_service_manager_from_init_and_run(self.init_name,
run_mock)
list_result = service_manager.list(ignore_status=False)
assert run_mock.call_args[0][
0] == "systemctl list-unit-files --type=service --no-pager --full"
assert list_result == {'sshd': "enabled",
'vsftpd': "disabled",
'systemd-sysctl': "static"}
def test_set_default_runlevel(self):
runlevel = service.convert_sysv_runlevel(3)
mktemp_mock = MagicMock(return_value="temp_filename")
symlink_mock = MagicMock()
rename_mock = MagicMock()
@patch.object(service, "mktemp", mktemp_mock)
@patch("os.symlink", symlink_mock)
@patch("os.rename", rename_mock)
def _():
self.service_manager.change_default_runlevel(runlevel)
assert mktemp_mock.called
assert symlink_mock.call_args[0][
0] == "/usr/lib/systemd/system/multi-user.target"
assert rename_mock.call_args[0][
1] == "/etc/systemd/system/default.target"
_()
def test_unknown_runlevel(self):
self.assertRaises(ValueError,
service.convert_systemd_target_to_runlevel, "unknown")
def test_runlevels(self):
assert service.convert_sysv_runlevel(0) == "poweroff.target"
assert service.convert_sysv_runlevel(1) == "rescue.target"
assert service.convert_sysv_runlevel(2) == "multi-user.target"
assert service.convert_sysv_runlevel(5) == "graphical.target"
assert service.convert_sysv_runlevel(6) == "reboot.target"
class TestSysVInitServiceManager(TestServiceManager):
def setUp(self):
self.run_mock = MagicMock()
self.init_name = "init"
self.service_manager = super(TestSysVInitServiceManager,
self).get_service_manager_from_init_and_run(self.init_name,
self.run_mock)
def test_list(self):
list_result_mock = MagicMock(exit_status=0,
stdout="sshd 0:off 1:off 2:off 3:off 4:off 5:off 6:off\n"
"vsftpd 0:off 1:off 2:off 3:off 4:off 5:on 6:off\n"
"xinetd based services:\n"
" amanda: off\n"
" chargen-dgram: on\n")
run_mock = MagicMock(return_value=list_result_mock)
service_manager = super(TestSysVInitServiceManager,
self).get_service_manager_from_init_and_run(self.init_name,
run_mock)
list_result = service_manager.list(ignore_status=False)
assert run_mock.call_args[0][
0] == "chkconfig --list"
assert list_result == {'sshd': {0: "off", 1: "off", 2: "off", 3: "off", 4: "off", 5: "off", 6: "off"},
'vsftpd': {0: "off", 1: "off", 2: "off", 3: "off", 4: "off", 5: "on", 6: "off"},
'xinetd': {'amanda': "off", 'chargen-dgram': "on"}}
def test_enable(self):
service = "lldpad"
self.service_manager.enable(service)
assert self.run_mock.call_args[0][0] == "chkconfig lldpad on"
def test_unknown_runlevel(self):
self.assertRaises(ValueError,
service.convert_sysv_runlevel, "unknown")
def test_runlevels(self):
assert service.convert_systemd_target_to_runlevel(
"poweroff.target") == '0'
assert service.convert_systemd_target_to_runlevel(
"rescue.target") == 's'
assert service.convert_systemd_target_to_runlevel(
"multi-user.target") == '3'
assert service.convert_systemd_target_to_runlevel(
"graphical.target") == '5'
assert service.convert_systemd_target_to_runlevel(
"reboot.target") == '6'
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册