提交 c413b23b 编写于 作者: M Maximilian Michels

[FLINK-3383] move snapshot deployment from Travis CI to ASF Jenkins

Deployment will be handled by ASF's Jenkins instances which trigger
the deployment of snapshot versions every night. This way, we have not
only separated deployment from CI testing, but also improved the
reliability of the test and deployment infrastructure.

- remove Travis dependency from Maven deploy script

- set force-dependency version to Flink version

This assures deployment works when only credentials for the snapshot
deployment are available. Previously, we deployed snapshots with release
credentials. These credentials would be over-privileged.

- remove old credentials from Travis config file

This closes #1619.
上级 f158f3dc
......@@ -44,13 +44,6 @@ env:
global:
# Global variable to avoid hanging travis builds when downloading cache archives.
- MALLOC_ARENA_MAX=2
# username and password for Apache Nexus (maven deploy)
- secure: "Nu2oNTrIAmxIkNEZzALw+GT2QBogEh/mqecSqoKDCk0oFjUZhrnrsIZYD/8zTG9fAVa5Gx4uWH4W824Va5RlBZvCs9UTh5TF25K2ORR9dB9FiXZ+Vjjig78sKJF7N73WVIOsHCSKpoBKnVkvNwxuAkPTMYjn3sswRh1pMu2VQ90="
- secure: "jMllQXAHpE+ijYXjvQvh0xml6DCL5pmESuWRtd0Wi4v56HHxKHc/Tty/CJvX8whVDLaHNFtwlbaIN9asSyAu1OyGhpWCqsmsxWF4atvKFua1oX45XMB26Ymf7Yr7aq7lcx66j0cYpfBXY4tFTFPiT05QnZ8XsHzEnv4Tpgif2dg="
# New s3 deployment
- ARTIFACTS_S3_BUCKET="stratosphere-bin"
- secure: "AECzVxihEhYfnNcrY/wLirTkKkmSATycvTfKsBmxD07bg6BmaVgsOl4degUu4YL50e6agpoWul6irGxTg0bjLMAwg1ZGyRx57NFvNQ7JYDHK6EWmJ7BsK2WO7HiYzfau+ZAaL36WpOMi0UUPpuNXMvULqaE9b4jZqo1Wo/WDcyU="
- secure: "SNZkMm++fvPbjdreibc4j/XTKy7rOvGvjbvJJLQ01fVDR8ur1FGB5L/CE9tm2Aye75G8br+tJ+gf5cMX8CHL+0VrvuTk5U6flbuM08Pd0pmP64ZncmGfRFKC5qTvt24YR0u3cqxWze8RTkdduz0t8xrEdyCtb94CHs1+RNS+0HA="
# Build artifacts like logs (variables for apache/flink repo)
- secure: "Fm3NK28qN8yLtpJl4VI58biBECpOodMYbYXPVWwa62R7jkhHl2U1s4Xa5ujEgNIDcsUsY66z0V4pU0Es0XLNOY2ajlaFOHTmngzFIXul1r4vuNy0H8okEBjs9Ks0TOWYrE6ndAv1J4/oUsRtehayrriaehn31emXL9c4RSKgaiQ="
- secure: "CGcWDpoPLKVPVxFCa+rh5svyrSy7tWTsydsFuLlw5BH5QR57FWH0P5ZBZ31MPppoNNpKEp1V5PBxOH0dUAx8SVNWQFNCsQrOwVpTnTlyl3Cd1udj2hahbB3l+IGf0+O3v2vv6blYm6vJb98NqzZknjdIefDDBfu52ndJy1UqHQw="
......@@ -62,7 +55,3 @@ before_script:
# We run mvn and monitor its output. If there is no output for the specified number of seconds, we
# print the stack traces of all running Java processes.
script: "./tools/travis_mvn_watchdog.sh 300"
# deploy if the first job is successful; should be replaced by an after_all_success if travis finally supports it
after_success:
- "./tools/deploy_to_maven.sh"
......@@ -110,7 +110,7 @@ under the License.
<dependency>
<groupId>org.apache.flink</groupId>
<artifactId>force-shading</artifactId>
<version>1.0.0</version>
<version>1.1-SNAPSHOT</version>
</dependency>
<dependency>
......
......@@ -17,16 +17,9 @@
# limitations under the License.
################################################################################
#Please ask @rmetzger (on GitHub) before changing anything here. It contains some magic.
# Build Responsibilities
# 1. Nothing
# 2. Nothing
# 3. Nothing
# 4. Deploy snapshot & S3 (hadoop2)
# 5. Deploy snapshot & S3 (hadoop1)
#
# Deploys snapshot builds to Apache's snapshot repository.
#
function getVersion() {
here="`dirname \"$0\"`" # relative
......@@ -70,65 +63,55 @@ function deploy_to_s3() {
pwd
# Check if push/commit is eligible for deploying
echo "Job: $TRAVIS_JOB_NUMBER ; isPR: $TRAVIS_PULL_REQUEST ; repo slug : $TRAVIS_REPO_SLUG ; branch: $TRAVIS_BRANCH "
if [[ $TRAVIS_PULL_REQUEST == "false" ]] && [[ $TRAVIS_REPO_SLUG == "apache/flink" ]] && ( [[ $TRAVIS_BRANCH == "master" ]] || [[ $TRAVIS_BRANCH == "release-"* ]]); then
echo "install lifecylce mapping fake plugin"
git clone https://github.com/mfriedenhagen/dummy-lifecycle-mapping-plugin.git
cd dummy-lifecycle-mapping-plugin
mvn -B install
cd ..
echo "install lifecylce mapping fake plugin"
git clone https://github.com/mfriedenhagen/dummy-lifecycle-mapping-plugin.git
cd dummy-lifecycle-mapping-plugin
mvn -B install
cd ..
# this will take a while
CURRENT_FLINK_VERSION=`getVersion`
if [[ "$CURRENT_FLINK_VERSION" == *-SNAPSHOT ]]; then
CURRENT_FLINK_VERSION_HADOOP1=${CURRENT_FLINK_VERSION/-SNAPSHOT/-hadoop1-SNAPSHOT}
else
CURRENT_FLINK_VERSION_HADOOP1="$CURRENT_FLINK_VERSION-hadoop1"
fi
# this will take a while
CURRENT_FLINK_VERSION=`getVersion`
if [[ "$CURRENT_FLINK_VERSION" == *-SNAPSHOT ]]; then
CURRENT_FLINK_VERSION_HADOOP1=${CURRENT_FLINK_VERSION/-SNAPSHOT/-hadoop1-SNAPSHOT}
else
CURRENT_FLINK_VERSION_HADOOP1="$CURRENT_FLINK_VERSION-hadoop1"
fi
echo "detected current version as: '$CURRENT_FLINK_VERSION' ; hadoop1: $CURRENT_FLINK_VERSION_HADOOP1 "
#
# This script deploys our project to sonatype SNAPSHOTS.
# It will deploy both a hadoop v1 and a hadoop v2 (yarn) artifact
#
echo "detected current version as: '$CURRENT_FLINK_VERSION' ; hadoop1: $CURRENT_FLINK_VERSION_HADOOP1 "
if [[ $CURRENT_FLINK_VERSION == *SNAPSHOT* ]] ; then
# Deploy hadoop v1 to maven
echo "Generating poms for hadoop1"
./tools/generate_specific_pom.sh $CURRENT_FLINK_VERSION $CURRENT_FLINK_VERSION_HADOOP1 pom.hadoop1.xml
mvn -B -f pom.hadoop1.xml -DskipTests -Drat.ignoreErrors=true deploy --settings deploysettings.xml
#
# This script is called by travis to deploy our project to sonatype SNAPSHOTS.
# It will deploy both a hadoop v1 and a hadoop v2 (yarn) artifact
#
# deploy to s3
deploy_to_s3 $CURRENT_FLINK_VERSION "hadoop1"
if [[ $TRAVIS_JOB_NUMBER == *5 ]] && [[ $CURRENT_FLINK_VERSION == *SNAPSHOT* ]] ; then
# Deploy hadoop v1 to maven
echo "Generating poms for hadoop1"
./tools/generate_specific_pom.sh $CURRENT_FLINK_VERSION $CURRENT_FLINK_VERSION_HADOOP1 pom.hadoop1.xml
mvn -B -f pom.hadoop1.xml -DskipTests -Drat.ignoreErrors=true deploy --settings deploysettings.xml;
# deploy hadoop v2 (yarn)
echo "deploy standard version (hadoop2) for scala 2.10"
# deploy to s3
deploy_to_s3 $CURRENT_FLINK_VERSION "hadoop1"
fi
# hadoop2 scala 2.10
mvn -B -DskipTests -Drat.skip=true -Drat.ignoreErrors=true clean deploy --settings deploysettings.xml
if [[ $TRAVIS_JOB_NUMBER == *4 ]] && [[ $CURRENT_FLINK_VERSION == *SNAPSHOT* ]] ; then
# the time to build and upload flink twice (scala 2.10 and scala 2.11) takes
# too much time. That's why we are going to do it in parallel
# Note that the parallel execution will cause the output to be interleaved
mkdir ../flink2
ls ../
cp -r . ../flink2
cd ../flink2
# deploy hadoop v2 (yarn)
echo "deploy standard version (hadoop2) for scala 2.10 from flink2 directory"
# do the hadoop2 scala 2.10 in the background
(mvn -B -DskipTests -Drat.skip=true -Drat.ignoreErrors=true clean deploy --settings deploysettings.xml; deploy_to_s3 $CURRENT_FLINK_VERSION "hadoop2" ) &
# switch back to the regular flink directory
cd ../flink
echo "deploy hadoop2 version (standard) for scala 2.11 from flink directory"
./tools/change-scala-version.sh 2.11
mvn -B -DskipTests -Drat.skip=true -Drat.ignoreErrors=true clean deploy --settings deploysettings.xml;
deploy_to_s3 $CURRENT_FLINK_VERSION "hadoop2_2.11"
echo "Changing back to scala 2.10"
./tools/change-scala-version.sh 2.10
fi
fi # pull request check
deploy_to_s3 $CURRENT_FLINK_VERSION "hadoop2"
echo "deploy hadoop2 version (standard) for scala 2.11"
./tools/change-scala-version.sh 2.11
mvn -B -DskipTests -Drat.skip=true -Drat.ignoreErrors=true clean deploy --settings deploysettings.xml
deploy_to_s3 $CURRENT_FLINK_VERSION "hadoop2_2.11"
echo "Changing back to scala 2.10"
./tools/change-scala-version.sh 2.10
exit 0
else
exit 1
fi
......@@ -38,7 +38,7 @@ under the License.
<groupId>org.apache.flink</groupId>
<artifactId>force-shading</artifactId>
<version>1.0.0</version>
<version>1.1-SNAPSHOT</version>
<packaging>jar</packaging>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册