1
0
mirror of https://github.com/tektoncd/catalog.git synced 2024-11-23 06:08:46 +00:00

Add the required boilerplate to run presubmit tests in prow.

This adds a single dummy test (required to make dep fetch the dependencies),
and then the Gopkg.toml configuration so we grab the presubmit helper scripts.

This will eventually use the code in plumbing instead of knative, but that isn't
ready yet.
This commit is contained in:
Dan Lorenc 2019-05-13 09:17:40 -05:00 committed by tekton-robot
parent 01b17f5954
commit acb7ca4c0f
13 changed files with 2397 additions and 0 deletions

17
Gopkg.lock generated Normal file
View File

@ -0,0 +1,17 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
digest = "1:92011435e48a9fed9f9e8afa03afecb5183ce15ff76f0ad108ea59751a15a080"
name = "github.com/knative/test-infra"
packages = ["scripts"]
pruneopts = "UT"
revision = "51f0bf3fa4146984c05cd1846dddf10a5908f167"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = ["github.com/knative/test-infra/scripts"]
solver-name = "gps-cdcl"
solver-version = 1

38
Gopkg.toml Normal file
View File

@ -0,0 +1,38 @@
# Gopkg.toml example
#
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
#
# [prune]
# non-go = false
# go-tests = true
# unused-packages = true
required = [
"github.com/knative/test-infra/scripts",
]
[[prune.project]]
name = "github.com/knative/test-infra"
non-go = false
[prune]
non-go = true
go-tests = true
unused-packages = true

23
test/integration_test.go Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import "testing"
func TestDummy(t *testing.T) {
}

32
test/presubmit-tests.sh Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the presubmit tests; it is started by prow for each PR.
# For convenience, it can also be executed manually.
# Running the script without parameters, or with the --all-tests
# flag, causes all tests to be executed, in the right order.
# Use the flags --build-tests, --unit-tests and --integration-tests
# to run a specific set of tests.
# Markdown linting failures don't show up properly in Gubernator resulting
# in a net-negative contributor experience.
export DISABLE_MD_LINTING=1
source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
# We use the default build, unit and integration test runners.
main $@

202
vendor/github.com/knative/test-infra/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

274
vendor/github.com/knative/test-infra/scripts/README.md generated vendored Normal file
View File

@ -0,0 +1,274 @@
# Helper scripts
This directory contains helper scripts used by Prow test jobs, as well and
local development scripts.
## Using the `presubmit-tests.sh` helper script
This is a helper script to run the presubmit tests. To use it:
1. Source this script.
1. [optional] Define the function `build_tests()`. If you don't define this
function, the default action for running the build tests is to:
- check markdown files
- run `go build` on the entire repo
- run `/hack/verify-codegen.sh` (if it exists)
- check licenses in all go packages
The markdown link checker tool doesn't check `localhost` links by default.
Its configuration file, `markdown-link-check-config.json`, lives in the
`test-infra/scripts` directory. To override it, create a file with the same
name, containing the custom config in the `/test` directory.
The markdown lint tool ignores long lines by default. Its configuration file,
`markdown-lint-config.rc`, lives in the `test-infra/scripts` directory. To
override it, create a file with the same name, containing the custom config
in the `/test` directory.
1. [optional] Customize the default build test runner, if you're using it. Set
the following environment variables if the default values don't fit your needs:
- `DISABLE_MD_LINTING`: Disable linting markdown files, defaults to 0 (false).
- `DISABLE_MD_LINK_CHECK`: Disable checking links in markdown files, defaults
to 0 (false).
- `PRESUBMIT_TEST_FAIL_FAST`: Fail the presubmit test immediately if a test fails,
defaults to 0 (false).
1. [optional] Define the functions `pre_build_tests()` and/or
`post_build_tests()`. These functions will be called before or after the
build tests (either your custom one or the default action) and will cause
the test to fail if they don't return success.
1. [optional] Define the function `unit_tests()`. If you don't define this
function, the default action for running the unit tests is to run all go tests
in the repo.
1. [optional] Define the functions `pre_unit_tests()` and/or
`post_unit_tests()`. These functions will be called before or after the
unit tests (either your custom one or the default action) and will cause
the test to fail if they don't return success.
1. [optional] Define the function `integration_tests()`. If you don't define
this function, the default action for running the integration tests is to run
all run all `./test/e2e-*tests.sh` scripts, in sequence.
1. [optional] Define the functions `pre_integration_tests()` and/or
`post_integration_tests()`. These functions will be called before or after the
integration tests (either your custom one or the default action) and will cause
the test to fail if they don't return success.
1. Call the `main()` function passing `$@` (without quotes).
Running the script without parameters, or with the `--all-tests` flag causes
all tests to be executed, in the right order (i.e., build, then unit, then
integration tests).
Use the flags `--build-tests`, `--unit-tests` and `--integration-tests` to run
a specific set of tests. The flag `--emit-metrics` is used to emit metrics when
running the tests, and is automatically handled by the default action for
integration tests (see above).
The script will automatically skip all presubmit tests for PRs where all changed
files are exempt of tests (e.g., a PR changing only the `OWNERS` file).
Also, for PRs touching only markdown files, the unit and integration tests are
skipped.
### Sample presubmit test script
```bash
source vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
function post_build_tests() {
echo "Cleaning up after build tests"
rm -fr ./build-cache
}
function unit_tests() {
make -C tests test
}
function pre_integration_tests() {
echo "Cleaning up before integration tests"
rm -fr ./staging-area
}
# We use the default integration test runner.
main $@
```
## Using the `e2e-tests.sh` helper script
This is a helper script for Knative E2E test scripts. To use it:
1. [optional] Customize the test cluster. Set the following environment variables
if the default values don't fit your needs:
- `E2E_CLUSTER_REGION`: Cluster region, defaults to `us-central1`.
- `E2E_CLUSTER_BACKUP_REGIONS`: Space-separated list of regions to retry test cluster creation in case of stockout. Defaults to `us-west1 us-east1`.
- `E2E_CLUSTER_ZONE`: Cluster zone (e.g., `a`), defaults to none (i.e. use a regional
cluster).
- `E2E_CLUSTER_BACKUP_ZONES`: Space-separated list of zones to retry test cluster creation in case of stockout. If defined, `E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none.
- `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to `n1-standard-4}`.
- `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when autoscaling,
defaults to 1.
- `E2E_MAX_CLUSTER_NODES`: Maximum number of nodes in the cluster when autoscaling,
defaults to 3.
1. Source the script.
1. [optional] Write the `knative_setup()` function, which will set up your
system under test (e.g., Knative Serving). This function won't be called if you
use the `--skip-knative-setup` flag.
1. [optional] Write the `knative_teardown()` function, which will tear down your
system under test (e.g., Knative Serving). This function won't be called if you
use the `--skip-knative-setup` flag.
1. [optional] Write the `test_setup()` function, which will set up the test
resources.
1. [optional] Write the `test_teardown()` function, which will tear down the test
resources.
1. [optional] Write the `cluster_setup()` function, which will set up any resources
before the test cluster is created.
1. [optional] Write the `cluster_teardown()` function, which will tear down any
resources after the test cluster is destroyed.
1. [optional] Write the `dump_extra_cluster_state()` function. It will be
called when a test fails, and can dump extra information about the current state
of the cluster (typically using `kubectl`).
1. [optional] Write the `parse_flags()` function. It will be called whenever an
unrecognized flag is passed to the script, allowing you to define your own flags.
The function must return 0 if the flag is unrecognized, or the number of items
to skip in the command line if the flag was parsed successfully. For example,
return 1 for a simple flag, and 2 for a flag with a parameter.
1. Call the `initialize()` function passing `$@` (without quotes).
1. Write logic for the end-to-end tests. Run all go tests using `go_test_e2e()`
(or `report_go_test()` if you need a more fine-grained control) and call
`fail_test()` or `success()` if any of them failed. The environment variable
`KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test cluster.
You can also use the following boolean (0 is false, 1 is true) environment
variables for the logic:
- `EMIT_METRICS`: true if `--emit-metrics` was passed.
All environment variables above are marked read-only.
**Notes:**
1. Calling your script without arguments will create a new cluster in the GCP
project `$PROJECT_ID` and run the tests against it.
1. Calling your script with `--run-tests` and the variable `KO_DOCKER_REPO` set
will immediately start the tests against the cluster currently configured for
`kubectl`.
1. By default Istio is installed on the cluster via Addon, use `--skip-istio-addon` if
you choose not to have it preinstalled.
1. You can force running the tests against a specific GKE cluster version by using
the `--cluster-version` flag and passing a full version as the flag value.
### Sample end-to-end test script
This script will test that the latest Knative Serving nightly release works. It
defines a special flag (`--no-knative-wait`) that causes the script not to
wait for Knative Serving to be up before running the tests. It also requires that
the test cluster is created in a specific region, `us-west2`.
```bash
# This test requires a cluster in LA
E2E_CLUSTER_REGION=us-west2
source vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
function knative_setup() {
start_latest_knative_serving
if (( WAIT_FOR_KNATIVE )); then
wait_until_pods_running knative-serving || fail_test "Knative Serving not up"
fi
}
function parse_flags() {
if [[ "$1" == "--no-knative-wait" ]]; then
WAIT_FOR_KNATIVE=0
return 1
fi
return 0
}
WAIT_FOR_KNATIVE=1
initialize $@
# TODO: use go_test_e2e to run the tests.
kubectl get pods || fail_test
success
```
## Using the `release.sh` helper script
This is a helper script for Knative release scripts. To use it:
1. Source the script.
1. [optional] By default, the release script will run `./test/presubmit-tests.sh`
as the release validation tests. If you need to run something else, set the
environment variable `VALIDATION_TESTS` to the executable to run.
1. Write logic for building the release in a function named `build_release()`.
Set the environment variable `YAMLS_TO_PUBLISH` to the list of yaml files created,
space separated. Use the following boolean (0 is false, 1 is true) and string
environment variables for the logic:
- `RELEASE_VERSION`: contains the release version if `--version` was passed. This
also overrides the value of the `TAG` variable as `v<version>`.
- `RELEASE_BRANCH`: contains the release branch if `--branch` was passed. Otherwise
it's empty and `master` HEAD will be considered the release branch.
- `RELEASE_NOTES`: contains the filename with the release notes if `--release-notes`
was passed. The release notes is a simple markdown file.
- `RELEASE_GCS_BUCKET`: contains the GCS bucket name to store the manifests if
`--release-gcs` was passed, otherwise the default value `knative-nightly/<repo>`
will be used. It is empty if `--publish` was not passed.
- `KO_DOCKER_REPO`: contains the GCR to store the images if `--release-gcr` was
passed, otherwise the default value `gcr.io/knative-nightly` will be used. It
is set to `ko.local` if `--publish` was not passed.
- `SKIP_TESTS`: true if `--skip-tests` was passed. This is handled automatically.
- `TAG_RELEASE`: true if `--tag-release` was passed. In this case, the environment
variable `TAG` will contain the release tag in the form `vYYYYMMDD-<commit_short_hash>`.
- `PUBLISH_RELEASE`: true if `--publish` was passed. In this case, the environment
variable `KO_FLAGS` will be updated with the `-L` option.
- `PUBLISH_TO_GITHUB`: true if `--version`, `--branch` and `--publish-release`
were passed.
All boolean environment variables default to false for safety.
All environment variables above, except `KO_FLAGS`, are marked read-only once
`main()` is called (see below).
1. Call the `main()` function passing `$@` (without quotes).
### Sample release script
```bash
source vendor/github.com/knative/test-infra/scripts/release.sh
function build_release() {
# config/ contains the manifests
ko resolve ${KO_FLAGS} -f config/ > release.yaml
YAMLS_TO_PUBLISH="release.yaml"
}
main $@
```

26
vendor/github.com/knative/test-infra/scripts/dummy.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scripts
import (
"fmt"
)
func main() {
fmt.Println("This is a dummy go file so `go dep` can be used with knative/test-infra/scripts")
fmt.Println("This file can be safely removed if one day this directory contains real, useful go code")
}

435
vendor/github.com/knative/test-infra/scripts/e2e-tests.sh generated vendored Executable file
View File

@ -0,0 +1,435 @@
#!/bin/bash
# Copyright 2019 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative E2E test scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER.
# Restricts the name length to 40 chars (the limit for resource names in GCP).
# Name will have the form $E2E_BASE_NAME-<PREFIX>$BUILD_NUMBER.
# Parameters: $1 - name suffix
function build_resource_name() {
local prefix=${E2E_BASE_NAME}-$1
local suffix=${BUILD_NUMBER}
# Restrict suffix length to 20 chars
if [[ -n "${suffix}" ]]; then
suffix=${suffix:${#suffix}<20?0:-20}
fi
local name="${prefix:0:20}${suffix}"
# Ensure name doesn't end with "-"
echo "${name%-}"
}
# Test cluster parameters
# Configurable parameters
# export E2E_CLUSTER_REGION and E2E_CLUSTER_ZONE as they're used in the cluster setup subprocess
export E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1}
# By default we use regional clusters.
export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-}
# Default backup regions in case of stockouts; by default we don't fall back to a different zone in the same region
readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1}
readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-}
readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-4}
readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod}
readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta}
# Each knative repository may have a different cluster size requirement here,
# so we allow calling code to set these parameters. If they are not set we
# use some sane defaults.
readonly E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-1}
readonly E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-3}
readonly E2E_BASE_NAME="k${REPO_NAME}"
readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls)
readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net)
readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result
# Flag whether test is using a boskos GCP project
IS_BOSKOS=0
# Tear down the test resources.
function teardown_test_resources() {
# On boskos, save time and don't teardown as the cluster will be destroyed anyway.
(( IS_BOSKOS )) && return
header "Tearing down test environment"
function_exists test_teardown && test_teardown
(( ! SKIP_KNATIVE_SETUP )) && function_exists knative_teardown && knative_teardown
# Delete the kubernetes source downloaded by kubetest
rm -fr kubernetes kubernetes.tar.gz
}
# Run the given E2E tests. Assume tests are tagged e2e, unless `-tags=XXX` is passed.
# Parameters: $1..$n - any go test flags, then directories containing the tests to run.
function go_test_e2e() {
local test_options=""
local go_options=""
(( EMIT_METRICS )) && test_options="-emitmetrics"
[[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e"
report_go_test -v -count=1 ${go_options} $@ ${test_options}
}
# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too.
# This is intended to be called when a test fails to provide debugging information.
function dump_cluster_state() {
echo "***************************************"
echo "*** E2E TEST FAILED ***"
echo "*** Start of information dump ***"
echo "***************************************"
echo ">>> All resources:"
kubectl get all --all-namespaces
echo ">>> Services:"
kubectl get services --all-namespaces
echo ">>> Events:"
kubectl get events --all-namespaces
function_exists dump_extra_cluster_state && dump_extra_cluster_state
echo "***************************************"
echo "*** E2E TEST FAILED ***"
echo "*** End of information dump ***"
echo "***************************************"
}
# On a Prow job, save some metadata about the test for Testgrid.
function save_metadata() {
(( ! IS_PROW )) && return
local geo_key="Region"
local geo_value="${E2E_CLUSTER_REGION}"
if [[ -n "${E2E_CLUSTER_ZONE}" ]]; then
geo_key="Zone"
geo_value="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
fi
local cluster_version="$(gcloud container clusters list --project=${E2E_PROJECT_ID} --format='value(currentMasterVersion)')"
cat << EOF > ${ARTIFACTS}/metadata.json
{
"E2E:${geo_key}": "${geo_value}",
"E2E:Machine": "${E2E_CLUSTER_MACHINE}",
"E2E:Version": "${cluster_version}",
"E2E:MinNodes": "${E2E_MIN_CLUSTER_NODES}",
"E2E:MaxNodes": "${E2E_MAX_CLUSTER_NODES}"
}
EOF
}
# Delete target pools and health checks that might have leaked.
# See https://github.com/knative/serving/issues/959 for details.
# TODO(adrcunha): Remove once the leak issue is resolved.
function delete_leaked_network_resources() {
# On boskos, don't bother with leaks as the janitor will delete everything in the project.
(( IS_BOSKOS )) && return
# Ensure we're using the GCP project used by kubetest
local gcloud_project="$(gcloud config get-value project)"
local http_health_checks="$(gcloud compute target-pools list \
--project=${gcloud_project} --format='value(healthChecks)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \
grep httpHealthChecks | tr '\n' ' ')"
local target_pools="$(gcloud compute target-pools list \
--project=${gcloud_project} --format='value(name)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \
tr '\n' ' ')"
if [[ -n "${target_pools}" ]]; then
echo "Found leaked target pools, deleting"
gcloud compute forwarding-rules delete -q --project=${gcloud_project} --region=${E2E_CLUSTER_REGION} ${target_pools}
gcloud compute target-pools delete -q --project=${gcloud_project} --region=${E2E_CLUSTER_REGION} ${target_pools}
fi
if [[ -n "${http_health_checks}" ]]; then
echo "Found leaked health checks, deleting"
gcloud compute http-health-checks delete -q --project=${gcloud_project} ${http_health_checks}
fi
}
# Create a test cluster with kubetest and call the current script again.
function create_test_cluster() {
# Fail fast during setup.
set -o errexit
set -o pipefail
if function_exists cluster_setup; then
cluster_setup || fail_test "cluster setup failed"
fi
echo "Cluster will have a minimum of ${E2E_MIN_CLUSTER_NODES} and a maximum of ${E2E_MAX_CLUSTER_NODES} nodes."
# Smallest cluster required to run the end-to-end-tests
local CLUSTER_CREATION_ARGS=(
--gke-create-command="container clusters create --quiet --enable-autoscaling --min-nodes=${E2E_MIN_CLUSTER_NODES} --max-nodes=${E2E_MAX_CLUSTER_NODES} --scopes=cloud-platform --enable-basic-auth --no-issue-client-certificate ${GKE_ADDONS} ${EXTRA_CLUSTER_CREATION_FLAGS[@]}"
--gke-shape={\"default\":{\"Nodes\":${E2E_MIN_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}}
--provider=gke
--deployment=gke
--cluster="${E2E_CLUSTER_NAME}"
--gcp-network="${E2E_NETWORK_NAME}"
--gke-environment="${E2E_GKE_ENVIRONMENT}"
--gke-command-group="${E2E_GKE_COMMAND_GROUP}"
--test=false
)
if (( ! IS_BOSKOS )); then
CLUSTER_CREATION_ARGS+=(--gcp-project=${GCP_PROJECT})
fi
# SSH keys are not used, but kubetest checks for their existence.
# Touch them so if they don't exist, empty files are create to satisfy the check.
mkdir -p $HOME/.ssh
touch $HOME/.ssh/google_compute_engine.pub
touch $HOME/.ssh/google_compute_engine
# Assume test failed (see details in set_test_return_code()).
set_test_return_code 1
local gcloud_project="${GCP_PROJECT}"
[[ -z "${gcloud_project}" ]] && gcloud_project="$(gcloud config get-value project)"
echo "gcloud project is ${gcloud_project}"
echo "gcloud user is $(gcloud config get-value core/account)"
(( IS_BOSKOS )) && echo "Using boskos for the test cluster"
[[ -n "${GCP_PROJECT}" ]] && echo "GCP project for test cluster is ${GCP_PROJECT}"
echo "Test script is ${E2E_SCRIPT}"
# Set arguments for this script again
local test_cmd_args="--run-tests"
(( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics"
(( SKIP_KNATIVE_SETUP )) && test_cmd_args+=" --skip-knative-setup"
[[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}"
[[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}"
local extra_flags=()
# If using boskos, save time and let it tear down the cluster
(( ! IS_BOSKOS )) && extra_flags+=(--down)
create_test_cluster_with_retries "${CLUSTER_CREATION_ARGS[@]}" \
--up \
--extract "${E2E_CLUSTER_VERSION}" \
--gcp-node-image "${SERVING_GKE_IMAGE}" \
--test-cmd "${E2E_SCRIPT}" \
--test-cmd-args "${test_cmd_args}" \
${extra_flags[@]} \
${EXTRA_KUBETEST_FLAGS[@]}
echo "Test subprocess exited with code $?"
# Ignore any errors below, this is a best-effort cleanup and shouldn't affect the test result.
set +o errexit
function_exists cluster_teardown && cluster_teardown
delete_leaked_network_resources
local result=$(get_test_return_code)
echo "Artifacts were written to ${ARTIFACTS}"
echo "Test result code is ${result}"
exit ${result}
}
# Retry backup regions/zones if cluster creations failed due to stockout.
# Parameters: $1..$n - any kubetest flags other than geo flag.
function create_test_cluster_with_retries() {
local cluster_creation_log=/tmp/${E2E_BASE_NAME}-cluster_creation-log
# zone_not_provided is a placeholder for e2e_cluster_zone to make for loop below work
local zone_not_provided="zone_not_provided"
local e2e_cluster_regions=(${E2E_CLUSTER_REGION})
local e2e_cluster_zones=(${E2E_CLUSTER_ZONE})
if [[ -n "${E2E_CLUSTER_BACKUP_ZONES}" ]]; then
e2e_cluster_zones+=(${E2E_CLUSTER_BACKUP_ZONES})
elif [[ -n "${E2E_CLUSTER_BACKUP_REGIONS}" ]]; then
e2e_cluster_regions+=(${E2E_CLUSTER_BACKUP_REGIONS})
e2e_cluster_zones=(${zone_not_provided})
else
echo "No backup region/zone set, cluster creation will fail in case of stockout"
fi
for e2e_cluster_region in "${e2e_cluster_regions[@]}"; do
for e2e_cluster_zone in "${e2e_cluster_zones[@]}"; do
E2E_CLUSTER_REGION=${e2e_cluster_region}
E2E_CLUSTER_ZONE=${e2e_cluster_zone}
[[ "${E2E_CLUSTER_ZONE}" == "${zone_not_provided}" ]] && E2E_CLUSTER_ZONE=""
local geoflag="--gcp-region=${E2E_CLUSTER_REGION}"
[[ -n "${E2E_CLUSTER_ZONE}" ]] && geoflag="--gcp-zone=${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
header "Creating test cluster in $E2E_CLUSTER_REGION $E2E_CLUSTER_ZONE"
# Don't fail test for kubetest, as it might incorrectly report test failure
# if teardown fails (for details, see success() below)
set +o errexit
{ run_go_tool k8s.io/test-infra/kubetest \
kubetest "$@" ${geoflag}; } 2>&1 | tee ${cluster_creation_log}
# Exit if test succeeded
[[ "$(get_test_return_code)" == "0" ]] && return
# If test failed not because of cluster creation stockout, return
[[ -z "$(grep -Eio 'does not have enough resources available to fulfill the request' ${cluster_creation_log})" ]] && return
done
done
}
# Setup the test cluster for running the tests.
function setup_test_cluster() {
# Fail fast during setup.
set -o errexit
set -o pipefail
header "Setting up test cluster"
# Set the actual project the test cluster resides in
# It will be a project assigned by Boskos if test is running on Prow,
# otherwise will be ${GCP_PROJECT} set up by user.
readonly export E2E_PROJECT_ID="$(gcloud config get-value project)"
# Save some metadata about cluster creation for using in prow and testgrid
save_metadata
local k8s_user=$(gcloud config get-value core/account)
local k8s_cluster=$(kubectl config current-context)
# If cluster admin role isn't set, this is a brand new cluster
# Setup the admin role and also KO_DOCKER_REPO
if [[ -z "$(kubectl get clusterrolebinding cluster-admin-binding 2> /dev/null)" ]]; then
acquire_cluster_admin_role ${k8s_user} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_REGION} ${E2E_CLUSTER_ZONE}
kubectl config set-context ${k8s_cluster} --namespace=default
export KO_DOCKER_REPO=gcr.io/${E2E_PROJECT_ID}/${E2E_BASE_NAME}-e2e-img
fi
echo "- Project is ${E2E_PROJECT_ID}"
echo "- Cluster is ${k8s_cluster}"
echo "- User is ${k8s_user}"
echo "- Docker is ${KO_DOCKER_REPO}"
export KO_DATA_PATH="${REPO_ROOT_DIR}/.git"
trap teardown_test_resources EXIT
# Handle failures ourselves, so we can dump useful info.
set +o errexit
set +o pipefail
if (( ! SKIP_KNATIVE_SETUP )) && function_exists knative_setup; then
knative_setup || fail_test "Knative setup failed"
fi
if function_exists test_setup; then
test_setup || fail_test "test setup failed"
fi
}
# Gets the exit of the test script.
# For more details, see set_test_return_code().
function get_test_return_code() {
echo $(cat ${TEST_RESULT_FILE})
}
# Set the return code that the test script will return.
# Parameters: $1 - return code (0-255)
function set_test_return_code() {
# kubetest teardown might fail and thus incorrectly report failure of the
# script, even if the tests pass.
# We store the real test result to return it later, ignoring any teardown
# failure in kubetest.
# TODO(adrcunha): Get rid of this workaround.
echo -n "$1"> ${TEST_RESULT_FILE}
}
# Signal (as return code and in the logs) that all E2E tests passed.
function success() {
set_test_return_code 0
echo "**************************************"
echo "*** E2E TESTS PASSED ***"
echo "**************************************"
exit 0
}
# Exit test, dumping current state info.
# Parameters: $1 - error message (optional).
function fail_test() {
set_test_return_code 1
[[ -n $1 ]] && echo "ERROR: $1"
dump_cluster_state
exit 1
}
RUN_TESTS=0
EMIT_METRICS=0
SKIP_KNATIVE_SETUP=0
SKIP_ISTIO_ADDON=0
GCP_PROJECT=""
E2E_SCRIPT=""
E2E_CLUSTER_VERSION=""
GKE_ADDONS=""
EXTRA_CLUSTER_CREATION_FLAGS=()
EXTRA_KUBETEST_FLAGS=()
E2E_SCRIPT_CUSTOM_FLAGS=()
# Parse flags and initialize the test cluster.
function initialize() {
E2E_SCRIPT="$(get_canonical_path $0)"
E2E_CLUSTER_VERSION="${SERVING_GKE_VERSION}"
cd ${REPO_ROOT_DIR}
while [[ $# -ne 0 ]]; do
local parameter=$1
# Try parsing flag as a custom one.
if function_exists parse_flags; then
parse_flags $@
local skip=$?
if [[ ${skip} -ne 0 ]]; then
# Skip parsed flag (and possibly argument) and continue
# Also save it to it's passed through to the test script
for ((i=1;i<=skip;i++)); do
E2E_SCRIPT_CUSTOM_FLAGS+=("$1")
shift
done
continue
fi
fi
# Try parsing flag as a standard one.
case ${parameter} in
--run-tests) RUN_TESTS=1 ;;
--emit-metrics) EMIT_METRICS=1 ;;
--skip-knative-setup) SKIP_KNATIVE_SETUP=1 ;;
--skip-istio-addon) SKIP_ISTIO_ADDON=1 ;;
*)
[[ $# -ge 2 ]] || abort "missing parameter after $1"
shift
case ${parameter} in
--gcp-project) GCP_PROJECT=$1 ;;
--cluster-version) E2E_CLUSTER_VERSION=$1 ;;
--cluster-creation-flag) EXTRA_CLUSTER_CREATION_FLAGS+=($1) ;;
--kubetest-flag) EXTRA_KUBETEST_FLAGS+=($1) ;;
*) abort "unknown option ${parameter}" ;;
esac
esac
shift
done
# Use PROJECT_ID if set, unless --gcp-project was used.
if [[ -n "${PROJECT_ID:-}" && -z "${GCP_PROJECT}" ]]; then
echo "\$PROJECT_ID is set to '${PROJECT_ID}', using it to run the tests"
GCP_PROJECT="${PROJECT_ID}"
fi
if (( ! IS_PROW )) && [[ -z "${GCP_PROJECT}" ]]; then
abort "set \$PROJECT_ID or use --gcp-project to select the GCP project where the tests are run"
fi
(( IS_PROW )) && [[ -z "${GCP_PROJECT}" ]] && IS_BOSKOS=1
# Safety checks
is_protected_gcr ${KO_DOCKER_REPO} && \
abort "\$KO_DOCKER_REPO set to ${KO_DOCKER_REPO}, which is forbidden"
(( SKIP_ISTIO_ADDON )) || GKE_ADDONS="--addons=Istio"
readonly RUN_TESTS
readonly EMIT_METRICS
readonly GCP_PROJECT
readonly IS_BOSKOS
readonly EXTRA_CLUSTER_CREATION_FLAGS
readonly EXTRA_KUBETEST_FLAGS
readonly SKIP_KNATIVE_SETUP
readonly GKE_ADDONS
if (( ! RUN_TESTS )); then
create_test_cluster
else
setup_test_cluster
fi
}

455
vendor/github.com/knative/test-infra/scripts/library.sh generated vendored Executable file
View File

@ -0,0 +1,455 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a collection of useful bash functions and constants, intended
# to be used in test scripts and the like. It doesn't do anything when
# called from command line.
# Default GKE version to be used with Knative Serving
readonly SERVING_GKE_VERSION=gke-latest
readonly SERVING_GKE_IMAGE=cos
# Public latest stable nightly images and yaml files.
readonly KNATIVE_BASE_YAML_SOURCE=https://storage.googleapis.com/knative-nightly/@/latest
readonly KNATIVE_SERVING_RELEASE=${KNATIVE_BASE_YAML_SOURCE/@/serving}/serving.yaml
readonly KNATIVE_BUILD_RELEASE=${KNATIVE_BASE_YAML_SOURCE/@/build}/build.yaml
readonly KNATIVE_EVENTING_RELEASE=${KNATIVE_BASE_YAML_SOURCE/@/eventing}/release.yaml
# Conveniently set GOPATH if unset
if [[ -z "${GOPATH:-}" ]]; then
export GOPATH="$(go env GOPATH)"
if [[ -z "${GOPATH}" ]]; then
echo "WARNING: GOPATH not set and go binary unable to provide it"
fi
fi
# Useful environment variables
[[ -n "${PROW_JOB_ID:-}" ]] && IS_PROW=1 || IS_PROW=0
readonly IS_PROW
readonly REPO_ROOT_DIR="$(git rev-parse --show-toplevel)"
readonly REPO_NAME="$(basename ${REPO_ROOT_DIR})"
# Set ARTIFACTS to an empty temp dir if unset
if [[ -z "${ARTIFACTS:-}" ]]; then
export ARTIFACTS="$(mktemp -d)"
fi
# On a Prow job, redirect stderr to stdout so it's synchronously added to log
(( IS_PROW )) && exec 2>&1
# Print error message and exit 1
# Parameters: $1..$n - error message to be displayed
function abort() {
echo "error: $@"
exit 1
}
# Display a box banner.
# Parameters: $1 - character to use for the box.
# $2 - banner message.
function make_banner() {
local msg="$1$1$1$1 $2 $1$1$1$1"
local border="${msg//[-0-9A-Za-z _.,\/()]/$1}"
echo -e "${border}\n${msg}\n${border}"
}
# Simple header for logging purposes.
function header() {
local upper="$(echo $1 | tr a-z A-Z)"
make_banner "=" "${upper}"
}
# Simple subheader for logging purposes.
function subheader() {
make_banner "-" "$1"
}
# Simple warning banner for logging purposes.
function warning() {
make_banner "!" "$1"
}
# Checks whether the given function exists.
function function_exists() {
[[ "$(type -t $1)" == "function" ]]
}
# Waits until the given object doesn't exist.
# Parameters: $1 - the kind of the object.
# $2 - object's name.
# $3 - namespace (optional).
function wait_until_object_does_not_exist() {
local KUBECTL_ARGS="get $1 $2"
local DESCRIPTION="$1 $2"
if [[ -n $3 ]]; then
KUBECTL_ARGS="get -n $3 $1 $2"
DESCRIPTION="$1 $3/$2"
fi
echo -n "Waiting until ${DESCRIPTION} does not exist"
for i in {1..150}; do # timeout after 5 minutes
if ! kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then
echo -e "\n${DESCRIPTION} does not exist"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist"
kubectl ${KUBECTL_ARGS}
return 1
}
# Waits until all pods are running in the given namespace.
# Parameters: $1 - namespace.
function wait_until_pods_running() {
echo -n "Waiting until all pods in namespace $1 are up"
for i in {1..150}; do # timeout after 5 minutes
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
# All pods must be running
local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l)
if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then
local all_ready=1
while read pod ; do
local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`)
# All containers must be ready
[[ -z ${status[0]} ]] && all_ready=0 && break
[[ -z ${status[1]} ]] && all_ready=0 && break
[[ ${status[0]} -lt 1 ]] && all_ready=0 && break
[[ ${status[1]} -lt 1 ]] && all_ready=0 && break
[[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break
done <<< "$(echo "${pods}" | grep -v Completed)"
if (( all_ready )); then
echo -e "\nAll pods are up:\n${pods}"
return 0
fi
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}"
return 1
}
# Waits until all batch jobs complete in the given namespace.
# Parameters: $1 - namespace.
function wait_until_batch_job_complete() {
echo -n "Waiting until all batch jobs in namespace $1 run to completion."
for i in {1..150}; do # timeout after 5 minutes
local jobs=$(kubectl get jobs -n $1 --no-headers \
-ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}')
# All jobs must be complete
local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l)
if [[ ${not_complete} -eq 0 ]]; then
echo -e "\nAll jobs are complete:\n${jobs}"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}"
return 1
}
# Waits until the given service has an external address (IP/hostname).
# Parameters: $1 - namespace.
# $2 - service name.
function wait_until_service_has_external_ip() {
echo -n "Waiting until service $2 in namespace $1 has an external address (IP/hostname)"
for i in {1..150}; do # timeout after 15 minutes
local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
if [[ -n "${ip}" ]]; then
echo -e "\nService $2.$1 has IP $ip"
return 0
fi
local hostname=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
if [[ -n "${hostname}" ]]; then
echo -e "\nService $2.$1 has hostname $hostname"
return 0
fi
echo -n "."
sleep 6
done
echo -e "\n\nERROR: timeout waiting for service $2.$1 to have an external address"
kubectl get pods -n $1
return 1
}
# Waits for the endpoint to be routable.
# Parameters: $1 - External ingress IP address.
# $2 - cluster hostname.
function wait_until_routable() {
echo -n "Waiting until cluster $2 at $1 has a routable endpoint"
for i in {1..150}; do # timeout after 5 minutes
local val=$(curl -H "Host: $2" "http://$1" 2>/dev/null)
if [[ -n "$val" ]]; then
echo -e "\nEndpoint is now routable"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: Timed out waiting for endpoint to be routable"
return 1
}
# Returns the name of the first pod of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pod() {
local pods=($(get_app_pods $1 $2))
echo "${pods[0]}"
}
# Returns the name of all pods of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pods() {
local namespace=""
[[ -n $2 ]] && namespace="-n $2"
kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[*].metadata.name}"
}
# Capitalize the first letter of each word.
# Parameters: $1..$n - words to capitalize.
function capitalize() {
local capitalized=()
for word in $@; do
local initial="$(echo ${word:0:1}| tr 'a-z' 'A-Z')"
capitalized+=("${initial}${word:1}")
done
echo "${capitalized[@]}"
}
# Dumps pod logs for the given app.
# Parameters: $1 - app name.
# $2 - namespace.
function dump_app_logs() {
echo ">>> ${REPO_NAME_FORMATTED} $1 logs:"
for pod in $(get_app_pods "$1" "$2")
do
echo ">>> Pod: $pod"
kubectl -n "$2" logs "$pod" -c "$1"
done
}
# Sets the given user as cluster admin.
# Parameters: $1 - user
# $2 - cluster name
# $3 - cluster region
# $4 - cluster zone, optional
function acquire_cluster_admin_role() {
echo "Acquiring cluster-admin role for user '$1'"
local geoflag="--region=$3"
[[ -n $4 ]] && geoflag="--zone=$3-$4"
# Get the password of the admin and use it, as the service account (or the user)
# might not have the necessary permission.
local password=$(gcloud --format="value(masterAuth.password)" \
container clusters describe $2 ${geoflag})
if [[ -n "${password}" ]]; then
# Cluster created with basic authentication
kubectl config set-credentials cluster-admin \
--username=admin --password=${password}
else
local cert=$(mktemp)
local key=$(mktemp)
echo "Certificate in ${cert}, key in ${key}"
gcloud --format="value(masterAuth.clientCertificate)" \
container clusters describe $2 ${geoflag} | base64 -d > ${cert}
gcloud --format="value(masterAuth.clientKey)" \
container clusters describe $2 ${geoflag} | base64 -d > ${key}
kubectl config set-credentials cluster-admin \
--client-certificate=${cert} --client-key=${key}
fi
kubectl config set-context $(kubectl config current-context) \
--user=cluster-admin
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole=cluster-admin \
--user=$1
# Reset back to the default account
gcloud container clusters get-credentials \
$2 ${geoflag} --project $(gcloud config get-value project)
}
# Runs a go test and generate a junit summary.
# Parameters: $1... - parameters to go test
function report_go_test() {
# Run tests in verbose mode to capture details.
# go doesn't like repeating -v, so remove if passed.
local args=" $@ "
local go_test="go test -race -v ${args/ -v / }"
# Just run regular go tests if not on Prow.
echo "Running tests with '${go_test}'"
local report=$(mktemp)
${go_test} | tee ${report}
local failed=( ${PIPESTATUS[@]} )
[[ ${failed[0]} -eq 0 ]] && failed=${failed[1]} || failed=${failed[0]}
echo "Finished run, return code is ${failed}"
# Install go-junit-report if necessary.
run_go_tool github.com/jstemmer/go-junit-report go-junit-report --help > /dev/null 2>&1
local xml=$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml)
cat ${report} \
| go-junit-report \
| sed -e "s#\"github.com/knative/${REPO_NAME}/#\"#g" \
> ${xml}
echo "XML report written to ${xml}"
if (( ! IS_PROW )); then
# Keep the suffix, so files are related.
local logfile=${xml/junit_/go_test_}
logfile=${logfile/.xml/.log}
cp ${report} ${logfile}
echo "Test log written to ${logfile}"
fi
return ${failed}
}
# Install the latest stable Knative/serving in the current cluster.
function start_latest_knative_serving() {
header "Starting Knative Serving"
subheader "Installing Knative Serving"
echo "Installing Serving from ${KNATIVE_SERVING_RELEASE}"
kubectl apply -f ${KNATIVE_SERVING_RELEASE} || return 1
wait_until_pods_running knative-serving || return 1
}
# Run a go tool, installing it first if necessary.
# Parameters: $1 - tool package/dir for go get/install.
# $2 - tool to run.
# $3..$n - parameters passed to the tool.
function run_go_tool() {
local tool=$2
if [[ -z "$(which ${tool})" ]]; then
local action=get
[[ $1 =~ ^[\./].* ]] && action=install
go ${action} $1
fi
shift 2
${tool} "$@"
}
# Run dep-collector to update licenses.
# Parameters: $1 - output file, relative to repo root dir.
# $2...$n - directories and files to inspect.
function update_licenses() {
cd ${REPO_ROOT_DIR} || return 1
local dst=$1
shift
run_go_tool ./vendor/github.com/knative/test-infra/tools/dep-collector dep-collector $@ > ./${dst}
}
# Run dep-collector to check for forbidden liceses.
# Parameters: $1...$n - directories and files to inspect.
function check_licenses() {
# Fetch the google/licenseclassifier for its license db
go get -u github.com/google/licenseclassifier
# Check that we don't have any forbidden licenses in our images.
run_go_tool ./vendor/github.com/knative/test-infra/tools/dep-collector dep-collector -check $@
}
# Run the given linter on the given files, checking it exists first.
# Parameters: $1 - tool
# $2 - tool purpose (for error message if tool not installed)
# $3 - tool parameters (quote if multiple parameters used)
# $4..$n - files to run linter on
function run_lint_tool() {
local checker=$1
local params=$3
if ! hash ${checker} 2>/dev/null; then
warning "${checker} not installed, not $2"
return 127
fi
shift 3
local failed=0
for file in $@; do
${checker} ${params} ${file} || failed=1
done
return ${failed}
}
# Check links in the given markdown files.
# Parameters: $1...$n - files to inspect
function check_links_in_markdown() {
# https://github.com/raviqqe/liche
local config="${REPO_ROOT_DIR}/test/markdown-link-check-config.rc"
[[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-link-check-config.rc"
local options="$(grep '^-' ${config} | tr \"\n\" ' ')"
run_lint_tool liche "checking links in markdown files" "-d ${REPO_ROOT_DIR} ${options}" $@
}
# Check format of the given markdown files.
# Parameters: $1..$n - files to inspect
function lint_markdown() {
# https://github.com/markdownlint/markdownlint
local config="${REPO_ROOT_DIR}/test/markdown-lint-config.rc"
[[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-lint-config.rc"
run_lint_tool mdl "linting markdown files" "-c ${config}" $@
}
# Return whether the given parameter is an integer.
# Parameters: $1 - integer to check
function is_int() {
[[ -n $1 && $1 =~ ^[0-9]+$ ]]
}
# Return whether the given parameter is the knative release/nightly GCF.
# Parameters: $1 - full GCR name, e.g. gcr.io/knative-foo-bar
function is_protected_gcr() {
[[ -n $1 && "$1" =~ "^gcr.io/knative-(releases|nightly)/?$" ]]
}
# Remove symlinks in a path that are broken or lead outside the repo.
# Parameters: $1 - path name, e.g. vendor
function remove_broken_symlinks() {
for link in $(find $1 -type l); do
# Remove broken symlinks
if [[ ! -e ${link} ]]; then
unlink ${link}
continue
fi
# Get canonical path to target, remove if outside the repo
local target="$(ls -l ${link})"
target="${target##* -> }"
[[ ${target} == /* ]] || target="./${target}"
target="$(cd `dirname ${link}` && cd ${target%/*} && echo $PWD/${target##*/})"
if [[ ${target} != *github.com/knative/* ]]; then
unlink ${link}
continue
fi
done
}
# Return whether the given parameter is knative-tests.
# Parameters: $1 - project name
function is_protected_project() {
[[ -n "$1" && "$1" == "knative-tests" ]]
}
# Returns the canonical path of a filesystem object.
# Parameters: $1 - path to return in canonical form
# $2 - base dir for relative links; optional, defaults to current
function get_canonical_path() {
# We don't use readlink because it's not available on every platform.
local path=$1
local pwd=${2:-.}
[[ ${path} == /* ]] || path="${pwd}/${path}"
echo "$(cd ${path%/*} && echo $PWD/${path##*/})"
}
# Initializations that depend on previous functions.
# These MUST come last.
readonly _TEST_INFRA_SCRIPTS_DIR="$(dirname $(get_canonical_path ${BASH_SOURCE[0]}))"
readonly REPO_NAME_FORMATTED="Knative $(capitalize ${REPO_NAME//-/})"

View File

@ -0,0 +1,5 @@
# For help, see
# https://github.com/raviqqe/liche/blob/master/README.md
# Don't check localhost links
-x "^https?://localhost($|[:/].*)"

View File

@ -0,0 +1,5 @@
# For help, see
# https://github.com/markdownlint/markdownlint/blob/master/docs/configuration.md
# Ignore long lines
rules "~MD013"

View File

@ -0,0 +1,331 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative presubmit test scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# Custom configuration of presubmit tests
readonly DISABLE_MD_LINTING=${DISABLE_MD_LINTING:-0}
readonly DISABLE_MD_LINK_CHECK=${DISABLE_MD_LINK_CHECK:-0}
readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0}
# Extensions or file patterns that don't require presubmit tests.
readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS)
# Flag if this is a presubmit run or not.
[[ IS_PROW && -n "${PULL_PULL_SHA}" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0
readonly IS_PRESUBMIT
# List of changed files on presubmit, LF separated.
CHANGED_FILES=""
# Flags that this PR is exempt of presubmit tests.
IS_PRESUBMIT_EXEMPT_PR=0
# Flags that this PR contains only changes to documentation.
IS_DOCUMENTATION_PR=0
# Returns true if PR only contains the given file regexes.
# Parameters: $1 - file regexes, space separated.
function pr_only_contains() {
[[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]]
}
# List changed files in the current PR.
# This is implemented as a function so it can be mocked in unit tests.
function list_changed_files() {
/workspace/githubhelper -list-changed-files
}
# Initialize flags and context for presubmit tests:
# CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR.
function initialize_environment() {
CHANGED_FILES=""
IS_PRESUBMIT_EXEMPT_PR=0
IS_DOCUMENTATION_PR=0
(( ! IS_PRESUBMIT )) && return
CHANGED_FILES="$(list_changed_files)"
if [[ -n "${CHANGED_FILES}" ]]; then
echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}"
local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}"
pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1
pr_only_contains "\.md ${no_presubmit_files}" && IS_DOCUMENTATION_PR=1
else
header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY"
fi
readonly CHANGED_FILES
readonly IS_DOCUMENTATION_PR
readonly IS_PRESUBMIT_EXEMPT_PR
}
# Display a pass/fail banner for a test group.
# Parameters: $1 - test group name (e.g., build)
# $2 - result (0=passed, 1=failed)
function results_banner() {
local result
[[ $2 -eq 0 ]] && result="PASSED" || result="FAILED"
header "$1 tests ${result}"
}
# Run build tests. If there's no `build_tests` function, run the default
# build test runner.
function run_build_tests() {
(( ! RUN_BUILD_TESTS )) && return 0
header "Running build tests"
local failed=0
# Run pre-build tests, if any
if function_exists pre_build_tests; then
pre_build_tests || failed=1
fi
# Don't run build tests if pre-build tests failed
if (( ! failed )); then
if function_exists build_tests; then
build_tests || failed=1
else
default_build_test_runner || failed=1
fi
fi
# Don't run post-build tests if pre/build tests failed
if (( ! failed )) && function_exists post_build_tests; then
post_build_tests || failed=1
fi
results_banner "Build" ${failed}
return ${failed}
}
# Perform markdown build tests if necessary, unless disabled.
function markdown_build_tests() {
(( DISABLE_MD_LINTING && DISABLE_MD_LINK_CHECK )) && return 0
# Get changed markdown files (ignore /vendor and deleted files)
local mdfiles=""
for file in $(echo "${CHANGED_FILES}" | grep \.md$ | grep -v ^vendor/); do
[[ -f "${file}" ]] && mdfiles="${mdfiles} ${file}"
done
[[ -z "${mdfiles}" ]] && return 0
local failed=0
if (( ! DISABLE_MD_LINTING )); then
subheader "Linting the markdown files"
lint_markdown ${mdfiles} || failed=1
fi
if (( ! DISABLE_MD_LINK_CHECK )); then
subheader "Checking links in the markdown files"
check_links_in_markdown ${mdfiles} || failed=1
fi
return ${failed}
}
# Default build test runner that:
# * check markdown files
# * `go build` on the entire repo
# * run `/hack/verify-codegen.sh` (if it exists)
# * check licenses in all go packages
function default_build_test_runner() {
local failed=0
# Perform markdown build checks first
markdown_build_tests || failed=1
# For documentation PRs, just check the md files
(( IS_DOCUMENTATION_PR )) && return ${failed}
# Skip build test if there is no go code
local go_pkg_dirs="$(go list ./...)"
[[ -z "${go_pkg_dirs}" ]] && return ${failed}
# Ensure all the code builds
subheader "Checking that go code builds"
go build -v ./... || failed=1
# Get all build tags in go code (ignore /vendor)
local tags="$(grep -r '// +build' . \
| grep -v '^./vendor/' | cut -f3 -d' ' | sort | uniq | tr '\n' ' ')"
if [[ -n "${tags}" ]]; then
go test -run=^$ -tags="${tags}" ./... || failed=1
fi
if [[ -f ./hack/verify-codegen.sh ]]; then
subheader "Checking autogenerated code is up-to-date"
./hack/verify-codegen.sh || failed=1
fi
# Check that we don't have any forbidden licenses in our images.
subheader "Checking for forbidden licenses"
check_licenses ${go_pkg_dirs} || failed=1
return ${failed}
}
# Run unit tests. If there's no `unit_tests` function, run the default
# unit test runner.
function run_unit_tests() {
(( ! RUN_UNIT_TESTS )) && return 0
header "Running unit tests"
local failed=0
# Run pre-unit tests, if any
if function_exists pre_unit_tests; then
pre_unit_tests || failed=1
fi
# Don't run unit tests if pre-unit tests failed
if (( ! failed )); then
if function_exists unit_tests; then
unit_tests || failed=1
else
default_unit_test_runner || failed=1
fi
fi
# Don't run post-unit tests if pre/unit tests failed
if (( ! failed )) && function_exists post_unit_tests; then
post_unit_tests || failed=1
fi
results_banner "Unit" ${failed}
return ${failed}
}
# Default unit test runner that runs all go tests in the repo.
function default_unit_test_runner() {
report_go_test ./...
}
# Run integration tests. If there's no `integration_tests` function, run the
# default integration test runner.
function run_integration_tests() {
# Don't run integration tests if not requested OR on documentation PRs
(( ! RUN_INTEGRATION_TESTS )) && return 0
(( IS_DOCUMENTATION_PR )) && return 0
header "Running integration tests"
local failed=0
# Run pre-integration tests, if any
if function_exists pre_integration_tests; then
pre_integration_tests || failed=1
fi
# Don't run integration tests if pre-integration tests failed
if (( ! failed )); then
if function_exists integration_tests; then
integration_tests || failed=1
else
default_integration_test_runner || failed=1
fi
fi
# Don't run integration tests if pre/integration tests failed
if (( ! failed )) && function_exists post_integration_tests; then
post_integration_tests || failed=1
fi
results_banner "Integration" ${failed}
return ${failed}
}
# Default integration test runner that runs all `test/e2e-*tests.sh`.
function default_integration_test_runner() {
local options=""
local failed=0
(( EMIT_METRICS )) && options="--emit-metrics"
for e2e_test in $(find test/ -name e2e-*tests.sh); do
echo "Running integration test ${e2e_test}"
if ! ${e2e_test} ${options}; then
failed=1
fi
done
return ${failed}
}
# Options set by command-line flags.
RUN_BUILD_TESTS=0
RUN_UNIT_TESTS=0
RUN_INTEGRATION_TESTS=0
EMIT_METRICS=0
# Process flags and run tests accordingly.
function main() {
initialize_environment
if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then
header "Commit only contains changes that don't require tests, skipping"
exit 0
fi
# Show the version of the tools we're using
if (( IS_PROW )); then
# Disable gcloud update notifications
gcloud config set component_manager/disable_update_check true
header "Current test setup"
echo ">> gcloud SDK version"
gcloud version
echo ">> kubectl version"
kubectl version --client
echo ">> go version"
go version
echo ">> git version"
git version
echo ">> bazel version"
bazel version 2> /dev/null
if [[ "${DOCKER_IN_DOCKER_ENABLED}" == "true" ]]; then
echo ">> docker version"
docker version
fi
fi
[[ -z $1 ]] && set -- "--all-tests"
local TEST_TO_RUN=""
while [[ $# -ne 0 ]]; do
local parameter=$1
case ${parameter} in
--build-tests) RUN_BUILD_TESTS=1 ;;
--unit-tests) RUN_UNIT_TESTS=1 ;;
--integration-tests) RUN_INTEGRATION_TESTS=1 ;;
--emit-metrics) EMIT_METRICS=1 ;;
--all-tests)
RUN_BUILD_TESTS=1
RUN_UNIT_TESTS=1
RUN_INTEGRATION_TESTS=1
;;
--run-test)
shift
[[ $# -ge 1 ]] || abort "missing executable after --run-test"
TEST_TO_RUN=$1
;;
*) abort "error: unknown option ${parameter}" ;;
esac
shift
done
readonly RUN_BUILD_TESTS
readonly RUN_UNIT_TESTS
readonly RUN_INTEGRATION_TESTS
readonly EMIT_METRICS
readonly TEST_TO_RUN
cd ${REPO_ROOT_DIR}
# Tests to be performed, in the right order if --all-tests is passed.
local failed=0
if [[ -n "${TEST_TO_RUN}" ]]; then
if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then
abort "--run-test must be used alone"
fi
# If this is a presubmit run, but a documentation-only PR, don't run the test
(( IS_PRESUBMIT && IS_DOCUMENTATION_PR )) && exit 0
${TEST_TO_RUN} || failed=1
fi
run_build_tests || failed=1
# If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed
if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
run_unit_tests || failed=1
fi
# If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed
if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
run_integration_tests || failed=1
fi
exit ${failed}
}

554
vendor/github.com/knative/test-infra/scripts/release.sh generated vendored Executable file
View File

@ -0,0 +1,554 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative release scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# GitHub upstream.
readonly KNATIVE_UPSTREAM="https://github.com/knative/${REPO_NAME}"
# GCRs for Knative releases.
readonly NIGHTLY_GCR="gcr.io/knative-nightly/github.com/knative/${REPO_NAME}"
readonly RELEASE_GCR="gcr.io/knative-releases/github.com/knative/${REPO_NAME}"
# Georeplicate images to {us,eu,asia}.gcr.io
readonly GEO_REPLICATION=(us eu asia)
# Simple banner for logging purposes.
# Parameters: $1 - message to display.
function banner() {
make_banner "@" "$1"
}
# Tag images in the yaml files if $TAG is not empty.
# $KO_DOCKER_REPO is the registry containing the images to tag with $TAG.
# Parameters: $1..$n - yaml files to parse for images.
function tag_images_in_yamls() {
[[ -z ${TAG} ]] && return 0
local SRC_DIR="${GOPATH}/src/"
local DOCKER_BASE="${KO_DOCKER_REPO}/${REPO_ROOT_DIR/$SRC_DIR}"
local GEO_REGIONS="${GEO_REPLICATION[@]} "
echo "Tagging images under '${DOCKER_BASE}' with ${TAG}"
for file in $@; do
echo "Inspecting ${file}"
for image in $(grep -o "${DOCKER_BASE}/[a-z\./-]\+@sha256:[0-9a-f]\+" ${file}); do
for region in "" ${GEO_REGIONS// /. }; do
gcloud -q container images add-tag ${image} ${region}${image%%@*}:${TAG}
done
done
done
}
# Copy the given yaml files to the $RELEASE_GCS_BUCKET bucket's "latest" directory.
# If $TAG is not empty, also copy them to $RELEASE_GCS_BUCKET bucket's "previous" directory.
# Parameters: $1..$n - yaml files to copy.
function publish_yamls() {
function verbose_gsutil_cp {
local DEST="gs://${RELEASE_GCS_BUCKET}/$1/"
shift
echo "Publishing [$@] to ${DEST}"
gsutil -m cp $@ ${DEST}
}
# Before publishing the YAML files, cleanup the `latest` dir if it exists.
local latest_dir="gs://${RELEASE_GCS_BUCKET}/latest"
if [[ -n "$(gsutil ls ${latest_dir} 2> /dev/null)" ]]; then
echo "Cleaning up '${latest_dir}' first"
gsutil -m rm ${latest_dir}/**
fi
verbose_gsutil_cp latest $@
[[ -n ${TAG} ]] && verbose_gsutil_cp previous/${TAG} $@
}
# These are global environment variables.
SKIP_TESTS=0
PRESUBMIT_TEST_FAIL_FAST=1
TAG_RELEASE=0
PUBLISH_RELEASE=0
PUBLISH_TO_GITHUB=0
TAG=""
RELEASE_VERSION=""
RELEASE_NOTES=""
RELEASE_BRANCH=""
RELEASE_GCS_BUCKET=""
KO_FLAGS=""
VALIDATION_TESTS="./test/presubmit-tests.sh"
YAMLS_TO_PUBLISH=""
FROM_NIGHTLY_RELEASE=""
FROM_NIGHTLY_RELEASE_GCS=""
export KO_DOCKER_REPO=""
export GITHUB_TOKEN=""
# Convenience function to run the hub tool.
# Parameters: $1..$n - arguments to hub.
function hub_tool() {
run_go_tool github.com/github/hub hub $@
}
# Shortcut to "git push" that handles authentication.
# Parameters: $1..$n - arguments to "git push <repo>".
function git_push() {
local repo_url="${KNATIVE_UPSTREAM}"
[[ -n "${GITHUB_TOKEN}}" ]] && repo_url="${repo_url/:\/\//:\/\/${GITHUB_TOKEN}@}"
git push ${repo_url} $@
}
# Return the master version of a release.
# For example, "v0.2.1" returns "0.2"
# Parameters: $1 - release version label.
function master_version() {
local release="${1//v/}"
local tokens=(${release//\./ })
echo "${tokens[0]}.${tokens[1]}"
}
# Return the release build number of a release.
# For example, "v0.2.1" returns "1".
# Parameters: $1 - release version label.
function release_build_number() {
local tokens=(${1//\./ })
echo "${tokens[2]}"
}
# Return the short commit SHA from a release tag.
# For example, "v20010101-deadbeef" returns "deadbeef".
function hash_from_tag() {
local tokens=(${1//-/ })
echo "${tokens[1]}"
}
# Setup the repository upstream, if not set.
function setup_upstream() {
# hub and checkout need the upstream URL to be set
# TODO(adrcunha): Use "git remote get-url" once available on Prow.
local upstream="$(git config --get remote.upstream.url)"
echo "Remote upstream URL is '${upstream}'"
if [[ -z "${upstream}" ]]; then
echo "Setting remote upstream URL to '${KNATIVE_UPSTREAM}'"
git remote add upstream ${KNATIVE_UPSTREAM}
fi
}
# Fetch the release branch, so we can check it out.
function setup_branch() {
[[ -z "${RELEASE_BRANCH}" ]] && return
git fetch ${KNATIVE_UPSTREAM} ${RELEASE_BRANCH}:upstream/${RELEASE_BRANCH}
}
# Setup version, branch and release notes for a auto release.
function prepare_auto_release() {
echo "Auto release requested"
TAG_RELEASE=1
PUBLISH_RELEASE=1
git fetch --all
local tags="$(git tag | cut -d 'v' -f2 | cut -d '.' -f1-2 | sort | uniq)"
local branches="$( { (git branch -r | grep upstream/release-) ; (git branch | grep release-); } | cut -d '-' -f2 | sort | uniq)"
RELEASE_VERSION=""
[[ -n "${tags}" ]] || abort "cannot obtain release tags for the repository"
[[ -n "${branches}" ]] || abort "cannot obtain release branches for the repository"
for i in $branches; do
RELEASE_NUMBER=$i
for j in $tags; do
if [[ "$i" == "$j" ]]; then
RELEASE_NUMBER=""
fi
done
done
if [ -z "$RELEASE_NUMBER" ]; then
echo "*** No new release will be generated, as no new branches exist"
exit 0
fi
RELEASE_VERSION="${RELEASE_NUMBER}.0"
RELEASE_BRANCH="release-${RELEASE_NUMBER}"
echo "Will create release ${RELEASE_VERSION} from branch ${RELEASE_BRANCH}"
# If --release-notes not used, add a placeholder
if [[ -z "${RELEASE_NOTES}" ]]; then
RELEASE_NOTES="$(mktemp)"
echo "[add release notes here]" > ${RELEASE_NOTES}
fi
}
# Setup version, branch and release notes for a "dot" release.
function prepare_dot_release() {
echo "Dot release requested"
TAG_RELEASE=1
PUBLISH_RELEASE=1
# List latest release
local releases # don't combine with the line below, or $? will be 0
releases="$(hub_tool release)"
[[ $? -eq 0 ]] || abort "cannot list releases"
# If --release-branch passed, restrict to that release
if [[ -n "${RELEASE_BRANCH}" ]]; then
local version_filter="v${RELEASE_BRANCH##release-}"
echo "Dot release will be generated for ${version_filter}"
releases="$(echo "${releases}" | grep ^${version_filter})"
fi
local last_version="$(echo "${releases}" | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$' | sort -r | head -1)"
[[ -n "${last_version}" ]] || abort "no previous release exist"
if [[ -z "${RELEASE_BRANCH}" ]]; then
echo "Last release is ${last_version}"
# Determine branch
local major_minor_version="$(master_version ${last_version})"
RELEASE_BRANCH="release-${major_minor_version}"
echo "Last release branch is ${RELEASE_BRANCH}"
fi
# Ensure there are new commits in the branch, otherwise we don't create a new release
setup_branch
local last_release_commit="$(git rev-list -n 1 ${last_version})"
local release_branch_commit="$(git rev-list -n 1 upstream/${RELEASE_BRANCH})"
[[ -n "${last_release_commit}" ]] || abort "cannot get last release commit"
[[ -n "${release_branch_commit}" ]] || abort "cannot get release branch last commit"
if [[ "${last_release_commit}" == "${release_branch_commit}" ]]; then
echo "*** Branch ${RELEASE_BRANCH} is at commit ${release_branch_commit}"
echo "*** Branch ${RELEASE_BRANCH} has no new cherry-picks since release ${last_version}"
echo "*** No dot release will be generated, as no changes exist"
exit 0
fi
# Create new release version number
local last_build="$(release_build_number ${last_version})"
RELEASE_VERSION="${major_minor_version}.$(( last_build + 1 ))"
echo "Will create release ${RELEASE_VERSION} at commit ${release_branch_commit}"
# If --release-notes not used, copy from the latest release
if [[ -z "${RELEASE_NOTES}" ]]; then
RELEASE_NOTES="$(mktemp)"
hub_tool release show -f "%b" ${last_version} > ${RELEASE_NOTES}
echo "Release notes from ${last_version} copied to ${RELEASE_NOTES}"
fi
}
# Setup source nightly image for a release.
function prepare_from_nightly_release() {
echo "Release from nightly requested"
SKIP_TESTS=1
if [[ "${FROM_NIGHTLY_RELEASE}" == "latest" ]]; then
echo "Finding the latest nightly release"
find_latest_nightly "${NIGHTLY_GCR}" || abort "cannot find the latest nightly release"
echo "Latest nightly is ${FROM_NIGHTLY_RELEASE}"
fi
readonly FROM_NIGHTLY_RELEASE_GCS="gs://knative-nightly/${REPO_NAME}/previous/${FROM_NIGHTLY_RELEASE}"
gsutil ls -d "${FROM_NIGHTLY_RELEASE_GCS}" > /dev/null \
|| abort "nightly release ${FROM_NIGHTLY_RELEASE} doesn't exist"
}
# Build a release from an existing nightly one.
function build_from_nightly_release() {
banner "Building the release"
echo "Fetching manifests from nightly"
local yamls_dir="$(mktemp -d)"
gsutil -m cp -r "${FROM_NIGHTLY_RELEASE_GCS}/*" "${yamls_dir}" || abort "error fetching manifests"
# Update references to release GCR
for yaml in ${yamls_dir}/*.yaml; do
sed -i -e "s#${NIGHTLY_GCR}#${RELEASE_GCR}#" "${yaml}"
done
YAMLS_TO_PUBLISH="$(find ${yamls_dir} -name '*.yaml' -printf '%p ')"
echo "Copying nightly images"
copy_nightly_images_to_release_gcr "${NIGHTLY_GCR}" "${FROM_NIGHTLY_RELEASE}"
# Create a release branch from the nightly release tag.
local commit="$(hash_from_tag ${FROM_NIGHTLY_RELEASE})"
echo "Creating release branch ${RELEASE_BRANCH} at commit ${commit}"
git checkout -b ${RELEASE_BRANCH} ${commit} || abort "cannot create branch"
git_push upstream ${RELEASE_BRANCH} || abort "cannot push branch"
}
# Build a release from source.
function build_from_source() {
run_validation_tests ${VALIDATION_TESTS}
banner "Building the release"
build_release
# Do not use `||` above or any error will be swallowed.
if [[ $? -ne 0 ]]; then
abort "error building the release"
fi
}
# Copy tagged images from the nightly GCR to the release GCR, tagging them 'latest'.
# This is a recursive function, first call must pass $NIGHTLY_GCR as first parameter.
# Parameters: $1 - GCR to recurse into.
# $2 - tag to be used to select images to copy.
function copy_nightly_images_to_release_gcr() {
for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do
copy_nightly_images_to_release_gcr "${entry}" "$2"
# Copy each image with the given nightly tag
for x in $(gcloud --format="value(tags)" container images list-tags "${entry}" --filter="tags=$2" --limit=1); do
local path="${entry/${NIGHTLY_GCR}}" # Image "path" (remove GCR part)
local dst="${RELEASE_GCR}${path}:latest"
gcloud container images add-tag "${entry}:$2" "${dst}" || abort "error copying image"
done
done
}
# Recurse into GCR and find the nightly tag of the first `latest` image found.
# Parameters: $1 - GCR to recurse into.
function find_latest_nightly() {
for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do
find_latest_nightly "${entry}" && return 0
for tag in $(gcloud --format="value(tags)" container images list-tags "${entry}" \
--filter="tags=latest" --limit=1); do
local tags=( ${tag//,/ } )
# Skip if more than one nightly tag, as we don't know what's the latest.
if [[ ${#tags[@]} -eq 2 ]]; then
local nightly_tag="${tags[@]/latest}" # Remove 'latest' tag
FROM_NIGHTLY_RELEASE="${nightly_tag// /}" # Remove spaces
return 0
fi
done
done
return 1
}
# Parses flags and sets environment variables accordingly.
function parse_flags() {
TAG=""
RELEASE_VERSION=""
RELEASE_NOTES=""
RELEASE_BRANCH=""
KO_FLAGS="-P"
KO_DOCKER_REPO="gcr.io/knative-nightly"
RELEASE_GCS_BUCKET="knative-nightly/${REPO_NAME}"
GITHUB_TOKEN=""
FROM_NIGHTLY_RELEASE=""
local has_gcr_flag=0
local has_gcs_flag=0
local is_dot_release=0
local is_auto_release=0
cd ${REPO_ROOT_DIR}
while [[ $# -ne 0 ]]; do
local parameter=$1
case ${parameter} in
--skip-tests) SKIP_TESTS=1 ;;
--tag-release) TAG_RELEASE=1 ;;
--notag-release) TAG_RELEASE=0 ;;
--publish) PUBLISH_RELEASE=1 ;;
--nopublish) PUBLISH_RELEASE=0 ;;
--dot-release) is_dot_release=1 ;;
--auto-release) is_auto_release=1 ;;
--from-latest-nightly) FROM_NIGHTLY_RELEASE=latest ;;
*)
[[ $# -ge 2 ]] || abort "missing parameter after $1"
shift
case ${parameter} in
--github-token)
[[ ! -f "$1" ]] && abort "file $1 doesn't exist"
# Remove any trailing newline/space from token
GITHUB_TOKEN="$(echo -n $(cat $1))"
[[ -n "${GITHUB_TOKEN}" ]] || abort "file $1 is empty"
;;
--release-gcr)
KO_DOCKER_REPO=$1
has_gcr_flag=1
;;
--release-gcs)
RELEASE_GCS_BUCKET=$1
has_gcs_flag=1
;;
--version)
[[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'"
RELEASE_VERSION=$1
;;
--branch)
[[ $1 =~ ^release-[0-9]+\.[0-9]+$ ]] || abort "branch name must be 'release-[0-9].[0-9]'"
RELEASE_BRANCH=$1
;;
--release-notes)
[[ ! -f "$1" ]] && abort "file $1 doesn't exist"
RELEASE_NOTES=$1
;;
--from-nightly)
[[ $1 =~ ^v[0-9]+-[0-9a-f]+$ ]] || abort "nightly tag must be 'vYYYYMMDD-commithash'"
FROM_NIGHTLY_RELEASE=$1
;;
*) abort "unknown option ${parameter}" ;;
esac
esac
shift
done
# Do auto release unless release is forced
if (( is_auto_release )); then
(( is_dot_release )) && abort "cannot have both --dot-release and --auto-release set simultaneously"
[[ -n "${RELEASE_VERSION}" ]] && abort "cannot have both --version and --auto-release set simultaneously"
[[ -n "${RELEASE_BRANCH}" ]] && abort "cannot have both --branch and --auto-release set simultaneously"
[[ -n "${FROM_NIGHTLY_RELEASE}" ]] && abort "cannot have --auto-release with a nightly source"
setup_upstream
prepare_auto_release
fi
# Setup source nightly image
if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then
(( is_dot_release )) && abort "dot releases are built from source"
[[ -z "${RELEASE_VERSION}" ]] && abort "release version must be specified with --version"
# TODO(adrcunha): "dot" releases from release branches require releasing nightlies
# for such branches, which we don't do yet.
[[ "${RELEASE_VERSION}" =~ ^[0-9]+\.[0-9]+\.0$ ]] || abort "version format must be 'X.Y.0'"
RELEASE_BRANCH="release-$(master_version ${RELEASE_VERSION})"
prepare_from_nightly_release
setup_upstream
fi
# Setup dot releases
if (( is_dot_release )); then
setup_upstream
prepare_dot_release
fi
# Update KO_DOCKER_REPO and KO_FLAGS if we're not publishing.
if (( ! PUBLISH_RELEASE )); then
(( has_gcr_flag )) && echo "Not publishing the release, GCR flag is ignored"
(( has_gcs_flag )) && echo "Not publishing the release, GCS flag is ignored"
KO_DOCKER_REPO="ko.local"
KO_FLAGS="-L ${KO_FLAGS}"
RELEASE_GCS_BUCKET=""
fi
if (( TAG_RELEASE )); then
# Get the commit, excluding any tags but keeping the "dirty" flag
local commit="$(git describe --always --dirty --match '^$')"
[[ -n "${commit}" ]] || abort "error getting the current commit"
# Like kubernetes, image tag is vYYYYMMDD-commit
TAG="v$(date +%Y%m%d)-${commit}"
fi
if [[ -n "${RELEASE_VERSION}" ]]; then
TAG="v${RELEASE_VERSION}"
fi
[[ -n "${RELEASE_VERSION}" && -n "${RELEASE_BRANCH}" ]] && (( PUBLISH_RELEASE )) && PUBLISH_TO_GITHUB=1
readonly SKIP_TESTS
readonly TAG_RELEASE
readonly PUBLISH_RELEASE
readonly PUBLISH_TO_GITHUB
readonly TAG
readonly RELEASE_VERSION
readonly RELEASE_NOTES
readonly RELEASE_BRANCH
readonly RELEASE_GCS_BUCKET
readonly KO_DOCKER_REPO
readonly VALIDATION_TESTS
readonly FROM_NIGHTLY_RELEASE
}
# Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so.
# Parameters: $1 - executable that runs the tests.
function run_validation_tests() {
if (( ! SKIP_TESTS )); then
banner "Running release validation tests"
# Run tests.
if ! $1; then
banner "Release validation tests failed, aborting"
exit 1
fi
fi
}
# Entry point for a release script.
function main() {
function_exists build_release || abort "function 'build_release()' not defined"
[[ -x ${VALIDATION_TESTS} ]] || abort "test script '${VALIDATION_TESTS}' doesn't exist"
parse_flags $@
# Log what will be done and where.
banner "Release configuration"
echo "- gcloud user: $(gcloud config get-value core/account)"
echo "- Go path: ${GOPATH}"
echo "- Repository root: ${REPO_ROOT_DIR}"
echo "- Destination GCR: ${KO_DOCKER_REPO}"
(( SKIP_TESTS )) && echo "- Tests will NOT be run" || echo "- Tests will be run"
if (( TAG_RELEASE )); then
echo "- Artifacts will be tagged '${TAG}'"
else
echo "- Artifacts WILL NOT be tagged"
fi
if (( PUBLISH_RELEASE )); then
echo "- Release WILL BE published to '${RELEASE_GCS_BUCKET}'"
else
echo "- Release will not be published"
fi
if (( PUBLISH_TO_GITHUB )); then
echo "- Release WILL BE published to GitHub"
fi
if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then
echo "- Release will be A COPY OF '${FROM_NIGHTLY_RELEASE}' nightly"
else
echo "- Release will be BUILT FROM SOURCE"
[[ -n "${RELEASE_BRANCH}" ]] && echo "- Release will be built from branch '${RELEASE_BRANCH}'"
fi
[[ -n "${RELEASE_NOTES}" ]] && echo "- Release notes are generated from '${RELEASE_NOTES}'"
# Checkout specific branch, if necessary
if [[ -n "${RELEASE_BRANCH}" && -z "${FROM_NIGHTLY_RELEASE}" ]]; then
setup_upstream
setup_branch
git checkout upstream/${RELEASE_BRANCH} || abort "cannot checkout branch ${RELEASE_BRANCH}"
fi
set -o errexit
set -o pipefail
if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then
build_from_nightly_release
else
build_from_source
fi
[[ -z "${YAMLS_TO_PUBLISH}" ]] && abort "no manifests were generated"
# Ensure no empty YAML file will be published.
for yaml in ${YAMLS_TO_PUBLISH}; do
[[ -s ${yaml} ]] || abort "YAML file ${yaml} is empty"
done
echo "New release built successfully"
if (( PUBLISH_RELEASE )); then
tag_images_in_yamls ${YAMLS_TO_PUBLISH}
publish_yamls ${YAMLS_TO_PUBLISH}
publish_to_github ${YAMLS_TO_PUBLISH}
banner "New release published successfully"
fi
}
# Publishes a new release on GitHub, also git tagging it (unless this is not a versioned release).
# Parameters: $1..$n - YAML files to add to the release.
function publish_to_github() {
(( PUBLISH_TO_GITHUB )) || return 0
local title="${REPO_NAME_FORMATTED} release ${TAG}"
local attachments=()
local description="$(mktemp)"
local attachments_dir="$(mktemp -d)"
local commitish=""
# Copy each YAML to a separate dir
for yaml in $@; do
cp ${yaml} ${attachments_dir}/
attachments+=("--attach=${yaml}#$(basename ${yaml})")
done
echo -e "${title}\n" > ${description}
if [[ -n "${RELEASE_NOTES}" ]]; then
cat ${RELEASE_NOTES} >> ${description}
fi
git tag -a ${TAG} -m "${title}"
git_push tag ${TAG}
[[ -n "${RELEASE_BRANCH}" ]] && commitish="--commitish=${RELEASE_BRANCH}"
hub_tool release create \
--prerelease \
${attachments[@]} \
--file=${description} \
${commitish} \
${TAG}
}