checkall 7.4 KB
Newer Older
1
#!/bin/bash
2
ERR=()
3 4 5 6
RESULTS_DIR=$(./scripts/avocado config | grep datadir.paths.logs_dir | awk '{print $2}')
# Very basic version of expanduser
RESULTS_DIR="${RESULTS_DIR/#\~/$HOME}"

7
run_rc() {
8 9 10
    CHECK=$1
    shift
    echo -e "\n\e[32mRunning '$1'\e[0m"
11
    eval $*
12
    if [ $? != 0 ]; then
13 14
        echo -e "\e[31m$CHECK FAILED\e[0m"
        ERR+=("$CHECK")
15
        [ ! "$SELF_CHECK_CONTINUOUS" ] && exit 1
16 17
    else
        echo -e "\e[32m$CHECK PASSED\e[0m\n"
18
    fi
19
}
20 21 22 23 24


parallel_selftests() {
    local START=$(date +%s)
    local ERR=0
25
    local FIND_UNITTESTS=$(readlink -f ./contrib/scripts/avocado-find-unittests)
26
    local NO_WORKERS=$(($(cat /proc/cpuinfo | grep -c processor) * 2))
27 28 29 30

    # The directories that may contain files with tests, from the Avocado core
    # and from all optional plugins
    declare -A DIR_GLOB_MAP
31 32 33 34
    DIR_GLOB_MAP[selftests]="selftests/unit/test_*.py selftests/functional/test_*.py selftests/doc/test_*.py"
    for PLUGIN in $(find optional_plugins -mindepth 1 -maxdepth 1 -type d); do
        DIR_GLOB_MAP[$PLUGIN]="tests/test_*.py"
    done;
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54

    declare -A TESTS
    for TEST_DIR in "${!DIR_GLOB_MAP[@]}"; do
        # tests in core, that is "selftests" expect the regular path, while
        # python -m unittest module.class.test_name won't work for tests on
        # plugins without a change of directory because the full path is
        # not a valid python module (would need __init__.py) in places where
        # it doesn't make sense
        if [ "x$TEST_DIR" != "xselftests" ]; then
            OLD_PWD=$PWD
            cd $TEST_DIR
        fi
        # Use sort -R to randomize the order as longer tests
        # seems to be likely in the same file
        THIS_DIR_TESTS=$(${FIND_UNITTESTS} ${DIR_GLOB_MAP[$TEST_DIR]} | sort -R)
        if [ -n "$THIS_DIR_TESTS" ]; then
            TESTS[$TEST_DIR]=${THIS_DIR_TESTS};
        fi
        if [ -n $TEST_DIR ]; then
            cd $OLD_PWD
55 56
        fi
    done
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86

    for TEST_DIR in "${!TESTS[@]}"; do
        if [ "x$TEST_DIR" != "xselftests" ]; then
            OLD_PWD=$PWD
            cd $TEST_DIR
        fi

        declare -a ALL
        ALL=(${TESTS[$TEST_DIR]})

        local PER_SLICE=$((${#ALL[@]} / $NO_WORKERS))
        [ $PER_SLICE -eq 0 ] && PER_SLICE=1
        local PIDS=()
        local TMPS=()

        for I in $(seq 0 $PER_SLICE $((${#ALL[@]} - 1))); do
            TMP=$(mktemp /tmp/avocado_parallel_unittest_output_XXXXXX)
            TMPS+=("$TMP")
            ( python -m unittest ${ALL[@]:$I:$PER_SLICE} &> $TMP ) &
            PIDS+=("$!")
            sleep 0.1
        done

        FAILED_ONCE=()
        for I in $(seq 0 $((${#PIDS[@]} - 1))); do
            wait ${PIDS[$I]}
            RET=$?
            if [ $RET -ne 0 ]; then
                for FAILURE in $(cat "${TMPS[$I]}" | sed -n 's/\(ERROR\|FAIL\): \([^ ]*\) (\([^)]*\)).*/\3.\2/p'); do
                    FAILED_ONCE+=("$FAILURE")
87
                done
88 89 90 91 92 93 94 95 96 97 98 99
                # On Python 3.4, load errors are not treated as test failures, and we can
                # not easily tell which test failed to be loaded.  Let's return immediately.
                grep -q "AttributeError: 'module' object has no attribute" "${TMPS[$I]}"
                if [ $? == 0 ]; then
                    echo
                    echo ----------------------------------------------------------------------
                    echo "ERROR: failed to load at least 1 test"
                    echo "Check the following log output for more information:"
                    cat "${TMPS[$I]}"
                    echo ----------------------------------------------------------------------
                    return 1;
                fi
100
            else
101 102 103 104
                rm ${TMPS[$I]}
            fi
        done
        if [ ${#FAILED_ONCE[@]} -gt 0 ]; then
105
            if [ ${#FAILED_ONCE[@]} -le 24 ]; then
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
                echo ${#FAILED_ONCE[@]} failed during parallel execution, trying them in series
                echo "python -m unittest --failfast ${FAILED_ONCE[@]}"
                if python -m unittest --failfast ${FAILED_ONCE[@]}; then
                    echo "All failed tests passed when executed in series"
                    echo
                    for I in $(seq 0 $((${#PIDS[@]} - 1))); do
                        [ -e "${TMPS[$I]}" ] && rm "${TMPS[$I]}"
                    done
                else
                    echo
                    echo "Some test(s) failed in series as well, showing failures from parallel run:"
                    ERR=1
                fi
            else
                echo "${#FAILED_ONCE[@]} tests failed during execution, not trying to re-run them."
121 122 123
                ERR=1
            fi
        fi
124 125 126 127 128 129 130 131 132 133 134 135 136 137

        # Remove all tmp files
        for I in $(seq 0 $((${#PIDS[@]} - 1))); do
            if [ -e "${TMPS[$I]}" ]; then
                echo
                echo python -m unittest ${ALL[@]:$(($I * $PER_SLICE)):$PER_SLICE}
                cat "${TMPS[$I]}"
                rm "${TMPS[$I]}"
            fi
        done
        echo ----------------------------------------------------------------------
        echo Ran ${#ALL[@]} tests for $TEST_DIR in $(($(date +%s) - START))s
        if [ -n $TEST_DIR ]; then
            cd $OLD_PWD
138
        fi
139
    done;
140 141 142 143
    return $ERR
}


144 145 146 147 148 149 150 151 152
signed_off_check() {
    AUTHOR="$(git log -1 --pretty='format:%aN <%aE>')"
    git log -1 --pretty=format:%B | grep "Signed-off-by: $AUTHOR"
    if [ $? != 0 ]; then
        echo "The commit message does not contain author's signature (Signed-off-by: $AUTHOR)"
        return 1
    fi
}

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168

results_dir_content() {
    NOW="$(ls $RESULTS_DIR)"
    if [ "$(echo $* | xargs)" != "$(echo $NOW | xargs)" ]; then
        echo "The output of '$RESULTS_DIR' is not the same as before running the checks"
        echo "ORIGINAL:"
        echo "$*"
        echo "NOW:"
        echo "$NOW"
        return 1
    else:
        echo "No extra files were created in '$RESULTS_DIR'"
        return 0
    fi
}

169
[ "$SKIP_RESULTSDIR_CHECK" ] || RESULTS_DIR_CONTENT="$(ls $RESULTS_DIR 2> /dev/null)"
170 171

LINT_CMD="inspekt lint --exclude=.git"
C
Caio Carrara 已提交
172
PYLINT_ENABLE="--enable R0401,W0101,W0102,W0104,W0105,W0106,W0107,W0108,W0109,W0111,W0120,W0122,W0123,W0124,W0125,W0150,W0199,W0211,W0222,W0232,W0233,W0301,W0312,W0401,W0404,W0406,W0410,W0601,W0602,W0603,W0604,W0611,W0612,W0614,W0622,W0623,W0640,W0711,W1202,W1300,W1301,W1302,W1303,W1304,W1305,W1306,W1307,W1401,W1402,W1501,W1503,W1645"
173 174 175 176 177 178
if [ "$AVOCADO_PARALLEL_LINT" ]; then
    LINT_CMD="$LINT_CMD --parallel=$AVOCADO_PARALLEL_LINT"
fi

run_rc lint "$LINT_CMD $PYLINT_ENABLE"

179 180
# Skip checking test_utils_cpu.py due to inspektor bug
run_rc indent 'inspekt indent --exclude=.git,selftests/unit/test_utils_cpu.py'
181
run_rc style 'inspekt style --exclude=.git --disable E501,E265,W601,E402,E722'
182
run_rc boundaries 'selftests/modules_boundaries'
183
run_rc signed-off-by signed_off_check
184 185 186
if [ "$AVOCADO_PARALLEL_CHECK" ]; then
    run_rc selftests parallel_selftests
elif [ -z "$AVOCADO_SELF_CHECK" ]; then
187
    run_rc selftests selftests/run
188
else
189
    CMD='scripts/avocado run --job-results-dir=$(mktemp -d) selftests/{unit,functional,doc}'
190 191
    [ ! $SELF_CHECK_CONTINUOUS ] && CMD+=" --failfast on"
    run_rc selftests "$CMD"
192
fi
193
[ "$SKIP_RESULTSDIR_CHECK" ] || run_rc job-results results_dir_content "$RESULTS_DIR_CONTENT"
194 195 196 197 198 199 200 201 202 203 204 205 206 207 208

if [ "$ERR" ]; then
    echo -e "\e[31m"
    echo "Checks:"
    for CHECK in "${ERR[@]}"; do
        echo -e " * $CHECK FAILED"
    done
    echo -ne "\e[0m"
else
    echo -e "\e[32mAll checks PASSED\e[0m"
fi
if [ "$ERR" ]; then
    exit 1
fi
exit 0