code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
# Copyright: 2015 Masatake YAMATO
# License: GPL-2
. ../utils.sh
if ${CTAGS} --quiet --options=NONE --list-features | grep -q afasdfasfasfsa; then
echo
else
skip "example: no such feature"
fi
|
masatake/ctags
|
Tmain/tmain-skip-example.d/run.sh
|
Shell
|
gpl-2.0
| 202 |
#/bin/bash
# This program should compile paper.tex, a tex file assumed to be in
# the current/working directory. And it should tar-gzip the entire
# current directory (from the current directory) to a tgz file called
# paper.tgz. It should then move the paper.tgz file to /tmp.
#compile
latex -interaction=batchmode paper.tex>&/dev/null
#
tar -cvzf paper.tgz .>&/dev/null
#move to /tmp
mv paper.tgz /tmp/
|
sm88/cs699
|
bash/work/forupload/Q1-simple/tex2pdf.sh
|
Shell
|
gpl-2.0
| 412 |
#!/bin/bash
# DMTCP settings
DMTCP_PATH="/home/research/artpol/sandbox"
export PATH="$DMTCP_PATH/bin/:$PATH"
export LD_LIBRARY_PATH="$DMTCP_PATH/lib:$LD_LIBRARY_PATH"
|
jiajuncao/dmtcp
|
plugin/batch-queue/test-suite/dmtcp_env.sh
|
Shell
|
lgpl-3.0
| 168 |
#!/usr/bin/env bash
# Copyright 2018 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script is invoked by Jenkins and runs a diff on the microbenchmarks
set -ex
# avoid slow finalization after the script has exited.
source $(dirname $0)/../../../tools/internal_ci/helper_scripts/move_src_tree_and_respawn_itself_rc
# List of benchmarks that provide good signal for analyzing performance changes in pull requests
# Enter the gRPC repo root
cd $(dirname $0)/../../..
export PREPARE_BUILD_INSTALL_DEPS_OBJC=true
source tools/internal_ci/helper_scripts/prepare_build_macos_rc
tools/profiling/ios_bin/binary_size.py \
-d "origin/$KOKORO_GITHUB_PULL_REQUEST_TARGET_BRANCH"
|
jtattermusch/grpc
|
tools/internal_ci/macos/grpc_ios_binary_size.sh
|
Shell
|
apache-2.0
| 1,195 |
#!/bin/sh
# __BEGIN_LICENSE__
# Copyright (c) 2006-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NASA Vision Workbench is licensed under the Apache License,
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
#set -x
begins_with() {
if test x"${1#$2}" = "x$1"; then
return 1 #false!
else
return 0 #true!
fi
}
ends_with() {
if test x"${1%$2}" = "x$1"; then
return 1 #false!
else
return 0 #true!
fi
}
if test "$#" -lt "1"; then
echo "Usage: $0 <test-suite-name> [<module-name>]"
echo "module-name is inferred from pwd if possible"
exit 1
fi
test_name="$1"
wd="`pwd`"
srcdir="$(dirname $(cd ${0%/*}/../ && echo $PWD/${0##*/}))"
rel="${wd##$srcdir}"
if test -z "$2"; then
if ends_with $rel /tests; then
module_name="${rel%/tests}"
module_name="${module_name##*/}"
test_path="$wd"
elif begins_with $rel /src/vw/; then
module_name="${rel#/src/vw/}"
module_name="${module_name%%/*}"
test_path="$srcdir/src/vw/$module_name/tests"
else
echo "Couldn't infer module name from current directory"
echo "$wd"
exit 1
fi
else
module_name="$2"
test_path="$srcdir/src/vw/$module_name/tests"
fi
if test -d "$test_path"; then :; else
echo "Directory $test_path does not exist. Please create it first."
exit 1
fi
full_test="$test_path/Test${test_name}.h"
if test -e "$full_test"; then
echo "Refusing to overwrite test"
echo "$full_test"
exit 1
fi
echo "creating ${full_test}"
cat <<EOF > ${full_test}
#include <cxxtest/TestSuite.h>
#include <vw/${module_name}.h>
class ${test_name} : public CxxTest::TestSuite
{public:
void testOne()
{
TS_ASSERT_EQUALS( 1 + 1, 2 );
TS_ASSERT_EQUALS( 1 + 2, 2 );
}
};
EOF
# rel = */x/tests -> module[x], create test in pwd (since src/vw/tests matches)
# rel = src/vw/x -> module[x], create test in $srcdir/src/vw/x/tests
# else module-name is required
|
DougFirErickson/visionworkbench
|
scripts/create-test.sh
|
Shell
|
apache-2.0
| 2,574 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This contains util code for testing kubectl.
set -o errexit
set -o nounset
set -o pipefail
# Set locale to ensure english responses from kubectl commands
export LANG=C
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-2379}
API_PORT=${API_PORT:-8080}
SECURE_API_PORT=${SECURE_API_PORT:-6443}
API_HOST=${API_HOST:-127.0.0.1}
KUBELET_HEALTHZ_PORT=${KUBELET_HEALTHZ_PORT:-10248}
CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="gcr.io/google-containers/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="gcr.io/google-containers/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="gcr.io/google-containers/perl"
IMAGE_PAUSE_V2="gcr.io/google-containers/pause:2.0"
IMAGE_DAEMONSET_R2="gcr.io/google-containers/pause:latest"
IMAGE_DAEMONSET_R2_2="gcr.io/google-containers/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="gcr.io/google_containers/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="gcr.io/google_containers/nginx-slim:0.8"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
# Define variables for resource types to prevent typos.
clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
persistentvolumes="persistentvolumes"
pods="pods"
podtemplates="podtemplates"
replicasets="replicasets"
replicationcontrollers="replicationcontrollers"
roles="roles"
secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
selfsubjectaccessreviews="selfsubjectaccessreviews"
thirdpartyresources="thirdpartyresources"
customresourcedefinitions="customresourcedefinitions"
daemonsets="daemonsets"
controllerrevisions="controllerrevisions"
# include shell2junit library
sh2ju="${KUBE_ROOT}/third_party/forked/shell2junit/sh2ju.sh"
if [[ -f "${sh2ju}" ]]; then
source "${sh2ju}"
else
echo "failed to find third_party/forked/shell2junit/sh2ju.sh"
exit 1
fi
# record_command runs the command and records its output/error messages in junit format
# it expects the first to be the name of the command
# Example:
# record_command run_kubectl_tests
#
# WARNING: Variable changes in the command will NOT be effective after record_command returns.
# This is because the command runs in subshell.
function record_command() {
set +o nounset
set +o errexit
local name="$1"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "Recording: ${name}"
echo "Running command: $@"
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then
echo "Error when running ${name}"
foundError="True"
fi
set -o nounset
set -o errexit
}
# Stops the running kubectl proxy, if there is one.
function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
}
# Starts "kubect proxy" to test the client proxy. $1: api_prefix
function start-proxy()
{
stop-proxy
PROXY_PORT_FILE=$(mktemp proxy-port.out.XXXXX)
kube::log::status "Starting kubectl proxy on random port; output file in ${PROXY_PORT_FILE}; args: ${1-}"
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
attempts=$((attempts+1))
done
kube::log::status "kubectl proxy running on port ${PROXY_PORT}"
# We try checking kubectl proxy 30 times with 1s delays to avoid occasional
# failures.
if [ $# -eq 0 ]; then
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/healthz" "kubectl proxy"
else
kube::util::wait_for_url "http://127.0.0.1:${PROXY_PORT}/$1/healthz" "kubectl proxy --api-prefix=$1"
fi
}
function cleanup()
{
[[ -n "${APISERVER_PID-}" ]] && kill "${APISERVER_PID}" 1>&2 2>/dev/null
[[ -n "${CTLRMGR_PID-}" ]] && kill "${CTLRMGR_PID}" 1>&2 2>/dev/null
[[ -n "${KUBELET_PID-}" ]] && kill "${KUBELET_PID}" 1>&2 2>/dev/null
stop-proxy
kube::etcd::cleanup
rm -rf "${KUBE_TEMP}"
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "junit report dir:" ${junit_dir}
kube::log::status "Clean up complete"
}
# Executes curl against the proxy. $1 is the path to use, $2 is the desired
# return code. Prints a helpful message on failure.
function check-curl-proxy-code()
{
local status
local -r address=$1
local -r desired=$2
local -r full_address="${PROXY_HOST}:${PROXY_PORT}${address}"
status=$(curl -w "%{http_code}" --silent --output /dev/null "${full_address}")
if [ "${status}" == "${desired}" ]; then
return 0
fi
echo "For address ${full_address}, got ${status} but wanted ${desired}"
return 1
}
# TODO: Remove this function when we do the retry inside the kubectl commands. See #15333.
function kubectl-with-retry()
{
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
if [ "$preserve_err_file" != true ] ; then
rm "${ERROR_FILE}"
fi
break
fi
done
}
# Waits for the pods with the given label to match the list of names. Don't call
# this function unless you know the exact pod names, or expect no pods.
# $1: label to match
# $2: list of pod names sorted by name
# Example invocation:
# wait-for-pods-with-label "app=foo" "nginx-0nginx-1"
function wait-for-pods-with-label()
{
local i
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --template '{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
# Code to be run before running the tests.
setup() {
kube::util::trap_add cleanup EXIT SIGINT
kube::util::ensure-temp-dir
# ensure ~/.kube/config isn't loaded by tests
HOME="${KUBE_TEMP}"
kube::etcd::start
# Find a standard sed instance for use with edit scripts
SED=sed
if which gsed &>/dev/null; then
SED=gsed
fi
if ! ($SED --version 2>&1 | grep -q GNU); then
echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'."
exit 1
fi
kube::log::status "Building kubectl"
make -C "${KUBE_ROOT}" WHAT="cmd/kubectl"
# Check kubectl
kube::log::status "Running kubectl with no options"
"${KUBE_OUTPUT_HOSTBIN}/kubectl"
# TODO: we need to note down the current default namespace and set back to this
# namespace after the tests are done.
kubectl config view
CONTEXT="test"
kubectl config set-context "${CONTEXT}"
kubectl config use-context "${CONTEXT}"
kube::log::status "Setup complete"
}
########################################################
# Kubectl version (--short, --client, --output) #
########################################################
run_kubectl_version_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl version"
TEMP="${KUBE_TEMP}"
kubectl get "${kube_flags[@]}" --raw /version
# create version files, one for the client, one for the server.
# these are the files we will use to ensure that the remainder output is correct
kube::test::version::object_to_file "Client" "" "${TEMP}/client_version_test"
kube::test::version::object_to_file "Server" "" "${TEMP}/server_version_test"
kube::log::status "Testing kubectl version: check client only output matches expected output"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/client_only_version_test"
kube::test::version::object_to_file "Client" "--client" "${TEMP}/server_client_only_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_version_test" "the flag '--client' shows correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_version_test" "the flag '--client' correctly has no server version info"
kube::log::status "Testing kubectl version: verify json output"
kube::test::version::json_client_server_object_to_file "" "clientVersion" "${TEMP}/client_json_version_test"
kube::test::version::json_client_server_object_to_file "" "serverVersion" "${TEMP}/server_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_json_version_test" "--output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_json_version_test" "--output json has correct server info"
kube::log::status "Testing kubectl version: verify json output using additional --client flag does not contain serverVersion"
kube::test::version::json_client_server_object_to_file "--client" "clientVersion" "${TEMP}/client_only_json_version_test"
kube::test::version::json_client_server_object_to_file "--client" "serverVersion" "${TEMP}/server_client_only_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_only_json_version_test" "--client --output json has correct client info"
kube::test::version::diff_assert "${TEMP}/server_version_test" "ne" "${TEMP}/server_client_only_json_version_test" "--client --output json has no server info"
kube::log::status "Testing kubectl version: compare json output using additional --short flag"
kube::test::version::json_client_server_object_to_file "--short" "clientVersion" "${TEMP}/client_short_json_version_test"
kube::test::version::json_client_server_object_to_file "--short" "serverVersion" "${TEMP}/server_short_json_version_test"
kube::test::version::diff_assert "${TEMP}/client_version_test" "eq" "${TEMP}/client_short_json_version_test" "--short --output client json info is equal to non short result"
kube::test::version::diff_assert "${TEMP}/server_version_test" "eq" "${TEMP}/server_short_json_version_test" "--short --output server json info is equal to non short result"
kube::log::status "Testing kubectl version: compare json output with yaml output"
kube::test::version::json_object_to_file "" "${TEMP}/client_server_json_version_test"
kube::test::version::yaml_object_to_file "" "${TEMP}/client_server_yaml_version_test"
kube::test::version::diff_assert "${TEMP}/client_server_json_version_test" "eq" "${TEMP}/client_server_yaml_version_test" "--output json/yaml has identical information"
set +o nounset
set +o errexit
}
# Runs all pod related tests.
run_pod_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:pods)"
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod'
kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod'
# Repeat above test using jsonpath template
kube::test::get_object_jsonpath_assert pods "{.items[*]$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pod/valid-pod' "{$id_field}" 'valid-pod'
kube::test::get_object_jsonpath_assert 'pods/valid-pod' "{$id_field}" 'valid-pod'
# Describe command should print detailed information
kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_object_events_assert pods 'valid-pod'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert pods 'valid-pod' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert pods 'valid-pod' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert pods
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert pods false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert pods true
### Validate Export ###
kube::test::get_object_assert 'pods/valid-pod' "{{.metadata.namespace}} {{.metadata.name}}" '<no value> valid-pod' "--export=true"
### Dump current valid-pod POD
output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1 "${kube_flags[@]}")
### Delete POD valid-pod by id
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --now
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pod valid-pod "${kube_flags[@]}" --now
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete POD valid-pod by id with --grace-period=0
# Pre-condition: valid-pod POD exists
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command succeeds without --force by waiting
kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from dumped YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
echo "${output_pod}" | $SED '/namespace:/d' | kubectl create -f - "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod from JSON
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from JSON
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
### Create POD valid-pod from YAML
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with no parameter mustn't kill everything
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete PODs with --all and a label selector is not permitted
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Delete all PODs
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
# Post-condition: no POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
# Detailed tests for describe pod output
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-kubectl-describe-pod
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-kubectl-describe-pod' "{{$id_field}}" 'test-kubectl-describe-pod'
### Create a generic secret
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-kubectl-describe-pod' "{{$secret_type}}" 'test-type'
### Create a generic configmap
# Pre-condition: no CONFIGMAP exists
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key-2=value2 --namespace=test-kubectl-describe-pod
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap'
### Create a pod disruption budget with minAvailable
# Command
kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-1 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '2'
# Command
kubectl create pdb test-pdb-2 --selector=app=rails --min-available=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-2 --namespace=test-kubectl-describe-pod' "{{$pdb_min_available}}" '50%'
### Create a pod disruption budget with maxUnavailable
# Command
kubectl create pdb test-pdb-3 --selector=app=rails --max-unavailable=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-3 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '2'
# Command
kubectl create pdb test-pdb-4 --selector=app=rails --max-unavailable=50% --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'
### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod
# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod-with-api-env.yaml --namespace=test-kubectl-describe-pod
kube::test::describe_object_assert 'pods --namespace=test-kubectl-describe-pod' 'env-test-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Describe command (resource only) should print detailed information about environment variables
kube::test::describe_resource_assert 'pods --namespace=test-kubectl-describe-pod' "TEST_CMD_1" "<set to the key 'key-1' in secret 'test-secret'>" "TEST_CMD_2" "<set to the key 'key-2' of config map 'test-configmap'>" "TEST_CMD_3" "env-test-pod (v1:metadata.name)"
# Clean-up
kubectl delete pod env-test-pod --namespace=test-kubectl-describe-pod
kubectl delete secret test-secret --namespace=test-kubectl-describe-pod
kubectl delete configmap test-configmap --namespace=test-kubectl-describe-pod
kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod
kubectl delete namespace test-kubectl-describe-pod
### Create two PODs
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/redis/redis-master.yaml "${kube_flags[@]}"
# Post-condition: valid-pod and redis-master PODs are created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
### Delete multiple PODs at once
# Pre-condition: valid-pod and redis-master PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
# Command
kubectl delete pods valid-pod redis-master "${kube_flags[@]}" --grace-period=0 --force # delete multiple pods at once
# Post-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create valid-pod POD
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Label the valid-pod POD
# Pre-condition: valid-pod is not labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:'
# Command
kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}"
# Post-condition: valid-pod is labelled
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
### Label the valid-pod POD with empty label value
# Pre-condition: valid-pod does not have label "emptylabel"
kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:'
# Command
kubectl label pods valid-pod emptylabel="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptylabel" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" ''
### Annotate the valid-pod POD with empty annotation value
# Pre-condition: valid-pod does not have annotation "emptyannotation"
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" '<no value>'
# Command
kubectl annotate pods valid-pod emptyannotation="" "${kube_flags[@]}"
# Post-condition: valid pod contains "emptyannotation" with no value
kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field}.emptyannotation}}" ''
### Record label change
# Pre-condition: valid-pod does not have record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range.items}}{{$annotations_field}}:{{end}}" ''
# Command
kubectl label pods valid-pod record-change=true --record=true "${kube_flags[@]}"
# Post-condition: valid-pod has record annotation
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Do not record label change
# Command
kubectl label pods valid-pod no-record-change=true --record=false "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation still contains command with --record=true
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*--record=true.*"
### Record label change with unspecified flag and previous change already recorded
# Command
kubectl label pods valid-pod new-record-change=true "${kube_flags[@]}"
# Post-condition: valid-pod's record annotation contains new change
kube::test::get_object_assert 'pod valid-pod' "{{range$annotations_field}}{{.}}:{{end}}" ".*new-record-change=true.*"
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create pod-with-precision POD
# Pre-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/pod-with-precision.json "${kube_flags[@]}"
# Post-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'pod-with-precision:'
## Patch preserves precision
# Command
kubectl patch "${kube_flags[@]}" pod pod-with-precision -p='{"metadata":{"annotations":{"patchkey": "patchvalue"}}}'
# Post-condition: pod-with-precision POD has patched annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.patchkey}}" 'patchvalue'
# Command
kubectl label pods pod-with-precision labelkey=labelvalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has label
kube::test::get_object_assert 'pod pod-with-precision' "{{${labels_field}.labelkey}}" 'labelvalue'
# Command
kubectl annotate pods pod-with-precision annotatekey=annotatevalue "${kube_flags[@]}"
# Post-condition: pod-with-precision POD has annotation
kube::test::get_object_assert 'pod pod-with-precision' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Cleanup
kubectl delete pod pod-with-precision "${kube_flags[@]}"
### Annotate POD YAML file locally without effecting the live pod.
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Command
kubectl annotate -f hack/testdata/pod.yaml annotatekey=annotatevalue "${kube_flags[@]}"
# Pre-condition: annotationkey is annotationvalue
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
# Command
output_message=$(kubectl annotate --local -f hack/testdata/pod.yaml annotatekey=localvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: annotationkey is still annotationvalue in the live pod, but command output is the new value
kube::test::get_object_assert 'pod test-pod' "{{${annotations_field}.annotatekey}}" 'annotatevalue'
kube::test::if_has_string "${output_message}" "localvalue"
# Cleanup
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
### Create valid-pod POD
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl create --edit can update the label filed of multiple resources. tmp-editor.sh is a fake editor
TEMP=$(mktemp /tmp/tmp-editor-XXXXXXXX.sh)
echo -e "#!/bin/bash\n$SED -i \"s/mock/modified/g\" \$1" > ${TEMP}
chmod +x ${TEMP}
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-json.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
# Pre-condition: no services and no rcs exist
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
EDITOR=${TEMP} kubectl create --edit -f hack/testdata/multi-resource-list.json "${kube_flags[@]}"
# Post-condition: service named modified and rc named modified are created
kube::test::get_object_assert service "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'modified:'
# Clean up
rm ${TEMP}
kubectl delete service/modified "${kube_flags[@]}"
kubectl delete rc/modified "${kube_flags[@]}"
## kubectl create --edit won't create anything if user makes no changes
[ "$(EDITOR=cat kubectl create --edit -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -o json 2>&1 | grep 'Edit cancelled')" ]
## Create valid-pod POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## Patch can modify a local object
kubectl patch --local -f pkg/kubectl/validation/testdata/v1/validPod.yaml --patch='{"spec": {"restartPolicy":"Never"}}' -o jsonpath='{.spec.restartPolicy}' | grep -q "Never"
## Patch pod can change image
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod --record -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]}}'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# Post-condition: valid-pod has the record annotation
kube::test::get_object_assert pods "{{range.items}}{{$annotations_field}}:{{end}}" "${change_cause_annotation}"
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx2"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx2:'
# prove that patch can use different types
kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]'
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
# prove that yaml input works too
YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n'
kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}"
# Post-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'changed-with-yaml:'
## Patch pod from JSON can change image
# Command
kubectl patch "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "gcr.io/google_containers/pause-amd64:3.0"}]}}'
# Post-condition: valid-pod POD has image gcr.io/google_containers/pause-amd64:3.0
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/pause-amd64:3.0:'
## If resourceVersion is specified in the patch, it will be treated as a precondition, i.e., if the resourceVersion is different from that is stored in the server, the Patch should be rejected
ERROR_FILE="${KUBE_TEMP}/conflict-error"
## If the resourceVersion is the same as the one stored in the server, the patch will be applied.
# Command
# Needs to retry because other party may change the resource.
for count in {0..3}; do
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
rm "${ERROR_FILE}"
sleep $((2**count))
else
rm "${ERROR_FILE}"
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
break
fi
done
## If the resourceVersion is the different from the one stored in the server, the patch will be rejected.
resourceVersion=$(kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{ .metadata.resourceVersion }}')
((resourceVersion+=100))
# Command
kubectl patch "${kube_flags[@]}" pod valid-pod -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "nginx"}]},"metadata":{"resourceVersion":"'$resourceVersion'"}}' 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the conflict
if grep -q "please apply your changes to the latest version and try again" "${ERROR_FILE}"; then
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl patch with resourceVersion $resourceVersion\" returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
## --force replace pod can change other field, e.g., spec.container.name
# Command
kubectl get "${kube_flags[@]}" pod valid-pod -o json | $SED 's/"kubernetes-serve-hostname"/"replaced-k8s-serve-hostname"/g' > /tmp/tmp-valid-pod.json
kubectl replace "${kube_flags[@]}" --force -f /tmp/tmp-valid-pod.json
# Post-condition: spec.container.name = "replaced-k8s-serve-hostname"
kube::test::get_object_assert 'pod valid-pod' "{{(index .spec.containers 0).name}}" 'replaced-k8s-serve-hostname'
## check replace --grace-period requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --grace-period=1 -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-grace-period must have \-\-force specified'
## check replace --timeout requires --force
output_message=$(! kubectl replace "${kube_flags[@]}" --timeout=1s -f /tmp/tmp-valid-pod.json 2>&1)
kube::test::if_has_string "${output_message}" '\-\-timeout must have \-\-force specified'
#cleaning
rm /tmp/tmp-valid-pod.json
## replace of a cluster scoped resource can succeed
# Pre-condition: a node exists
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test"
}
}
__EOF__
kubectl replace -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Node",
"apiVersion": "v1",
"metadata": {
"name": "node-v1-test",
"annotations": {"a":"b"},
"resourceVersion": "0"
}
}
__EOF__
# Post-condition: the node command succeeds
kube::test::get_object_assert "node node-v1-test" "{{.metadata.annotations.a}}" 'b'
kubectl delete node node-v1-test "${kube_flags[@]}"
## kubectl edit can update the image field of a POD. tmp-editor.sh is a fake editor
echo -e "#!/bin/bash\n$SED -i \"s/nginx/gcr.io\/google_containers\/serve_hostname/g\" \$1" > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
# Pre-condition: valid-pod POD has image nginx
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:'
[[ "$(EDITOR=/tmp/tmp-editor.sh kubectl edit "${kube_flags[@]}" pods/valid-pod --output-patch=true | grep Patch:)" ]]
# Post-condition: valid-pod POD has image gcr.io/google_containers/serve_hostname
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'gcr.io/google_containers/serve_hostname:'
# cleaning
rm /tmp/tmp-editor.sh
## kubectl edit should work on Windows
[ "$(EDITOR=cat kubectl edit pod/valid-pod 2>&1 | grep 'Edit cancelled')" ]
[ "$(EDITOR=cat kubectl edit pod/valid-pod | grep 'name: valid-pod')" ]
[ "$(EDITOR=cat kubectl edit --windows-line-endings pod/valid-pod | file - | grep CRLF)" ]
[ ! "$(EDITOR=cat kubectl edit --windows-line-endings=false pod/valid-pod | file - | grep CRLF)" ]
[ "$(EDITOR=cat kubectl edit ns | grep 'kind: List')" ]
### Label POD YAML file locally without effecting the live pod.
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
output_message=$(kubectl label --local --overwrite -f hack/testdata/pod.yaml name=localonlyvalue -o yaml "${kube_flags[@]}")
echo $output_message
# Post-condition: name is still valid-pod in the live pod, but command output is the new value
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
kube::test::if_has_string "${output_message}" "localonlyvalue"
### Overwriting an existing label is not permitted
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
### --overwrite must be used to overwrite existing label, can be applied to all resources
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
kubectl label --overwrite pods --all name=valid-pod-super-sayan "${kube_flags[@]}"
# Post-condition: name is valid-pod-super-sayan
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod-super-sayan'
### Delete POD by label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two PODs from 1 yaml file
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: redis-master and valid-pod PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
### Delete two PODs from 1 yaml file
# Pre-condition: redis-master and valid-pod PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-master:valid-pod:'
# Command
kubectl delete -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: no PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply should update configuration annotations only if apply is already called
## 1. kubectl create doesn't set the annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is applied
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-applied'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration
## 4. kubectl replace updates an existing annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | $SED 's/test-pod-applied/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
kubectl replace -f "${KUBE_TEMP}"/test-pod-replace.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
# Runs tests related to kubectl apply.
run_kubectl_apply_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply"
## kubectl apply should create the resource that doesn't exist yet
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a pod "test-pod" (doesn't exist) should create this pod
kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}"
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete pods test-pod "${kube_flags[@]}"
## kubectl apply should be able to clear defaulted fields.
# Pre-Condition: no deployment exists
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}"
# Post-Condition: deployment "test-deployment-retainkeys" created
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys'
# Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Command: apply a deployment "test-deployment-retainkeys" should clear
# defaulted fields and successfully update the deployment
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]]
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]]
[[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]]
! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]]
# Clean up
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}"
## kubectl apply -f with label selector should only apply matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply
kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
## kubectl apply --prune
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "a" not found'
# cleanup
kubectl delete pods b
# same thing without prune for a sanity check
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply a
kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "b" not found'
# apply b
kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}"
# check both pods exist
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
# check wrong pod doesn't exist
# cleanup
kubectl delete pod/a pod/b
## kubectl apply --prune requires a --all flag to select everything
output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" \
'all resources selected for prune without explicitly passing --all'
# should apply everything
kubectl apply --all --prune -f hack/testdata/prune
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b'
kubectl delete pod/a pod/b
## kubectl apply --prune should fallback to delete for non reapable types
kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc'
kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}"
## kubectl apply --prune --prune-whitelist
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and don't prune pod a by overwriting whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a'
# apply svc and prune pod a with default whitelist
kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}"
kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# cleanup
kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}"
set +o nounset
set +o errexit
}
# Runs tests related to kubectl create --filename(-f) --selector(-l).
run_kubectl_create_filter_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create filter"
## kubectl create -f with label selector should only create matching objects
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# create
kubectl create -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}"
# check right pod exists
kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod'
# check wrong pod doesn't exist
output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
# cleanup
kubectl delete pods selector-test-pod
set +o nounset
set +o errexit
}
run_kubectl_apply_deployments_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl apply deployments"
## kubectl apply should propagate user defined null values
# Pre-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply base deployment
kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}"
# check right deployment exists
kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl'
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
# apply new deployment with new template labels
kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}"
# check right labels exists
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l2}}" 'l2'
kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l2}}" 'l2'
# cleanup
# need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0
# Post-Condition: no Deployments, ReplicaSets, Pods exist
kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
# Runs tests for --save-config tests.
run_save_config_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --save-config"
## Configuration annotations should be set when --save-config is enabled
## 1. kubectl create --save-config should generate configuration annotation
# Pre-Condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create a pod "test-pod"
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 2. kubectl edit --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 3. kubectl replace --save-config should generate configuration annotation
# Pre-Condition: no POD exists, then create pod "test-pod", which shouldn't have configuration annotation
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
## 4. kubectl run --save-config should generate configuration annotation
# Pre-Condition: no RC exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create the rc "nginx" with image nginx
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
# Post-Condition: rc "nginx" has configuration annotation
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
## 5. kubectl expose --save-config should generate configuration annotation
# Pre-Condition: no service exists
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: expose the rc "nginx"
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
# Post-Condition: service "nginx" has configuration annotation
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Clean up
kubectl delete rc,svc nginx
## 6. kubectl autoscale --save-config should generate configuration annotation
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
output_message=$(kubectl get hpa.autoscaling -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# tests kubectl group prefix matching
output_message=$(kubectl get hpa.autoscal -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the rc reaper.
kubectl delete hpa frontend "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_run_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl run"
## kubectl run should create deployments, jobs or cronjob
# Pre-Condition: no Job exists
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: Job "pi" is created
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By"
# Clean up
kubectl delete jobs pi "${kube_flags[@]}"
# Post-condition: no pods exist.
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Pre-Condition: no Deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run nginx-extensions "--image=$IMAGE_NGINX" "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$id_field}}:{{end}}" 'nginx-extensions:'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-extensions "${kube_flags[@]}"
# Command
kubectl run nginx-apps "--image=$IMAGE_NGINX" --generator=deployment/apps.v1beta1 "${kube_flags[@]}"
# Post-Condition: Deployment "nginx" is created
kube::test::get_object_assert deployment.apps "{{range.items}}{{$id_field}}:{{end}}" 'nginx-apps:'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Clean up
kubectl delete deployment nginx-apps "${kube_flags[@]}"
# Pre-Condition: no Job exists
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl run pi --schedule="*/5 * * * *" --generator=cronjob/v1beta1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
# Post-Condition: CronJob "pi" is created
kube::test::get_object_assert cronjobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
# Clean up
kubectl delete cronjobs pi "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_get_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl get"
### Test retrieval of non-existing pods
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}")
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of non-existing POD with output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
### Test retrieval of pods when none exist with non-human readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o yaml)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o jsonpath='{.items}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o go-template='{{.items}}')
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o custom-columns=NAME:.metadata.name)
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
### Test retrieval of pods when none exist, with human-readable output format flag specified
# Pre-condition: no pods exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods --ignore-not-found 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should not be part of the output
kube::test::if_has_not_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get pods 2>&1 "${kube_flags[@]}" -o wide)
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test retrieval of non-existing POD with json output flag specified
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl get pods abc 2>&1 "${kube_flags[@]}" -o json)
# Post-condition: POD abc should error since it doesn't exist
kube::test::if_has_string "${output_message}" 'pods "abc" not found'
# Post-condition: make sure we don't display an empty List
if kube::test::if_has_string "${output_message}" 'List'; then
echo 'Unexpected List output'
echo "${LINENO} $(basename $0)"
exit 1
fi
### Test kubectl get all
output_message=$(kubectl --v=6 --namespace default get all 2>&1 "${kube_flags[@]}")
# Post-condition: Check if we get 200 OK from all the url(s)
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/pods 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/replicationcontrollers 200 OK"
kube::test::if_has_string "${output_message}" "/api/v1/namespaces/default/services 200 OK"
kube::test::if_has_string "${output_message}" "/apis/apps/v1beta1/namespaces/default/statefulsets 200 OK"
kube::test::if_has_string "${output_message}" "/apis/autoscaling/v1/namespaces/default/horizontalpodautoscalers 200"
kube::test::if_has_string "${output_message}" "/apis/batch/v1/namespaces/default/jobs 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/deployments 200 OK"
kube::test::if_has_string "${output_message}" "/apis/extensions/v1beta1/namespaces/default/replicasets 200 OK"
### Test --allow-missing-template-keys
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --allow-missing-template-keys defaults to true for jsonpath templates
kubectl get "${kube_flags[@]}" pod valid-pod -o jsonpath='{.missing}'
## check --allow-missing-template-keys defaults to true for go templates
kubectl get "${kube_flags[@]}" pod valid-pod -o go-template='{{.missing}}'
## check --allow-missing-template-keys=false results in an error for a missing key with jsonpath
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o jsonpath='{.missing}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'missing is not found'
## check --allow-missing-template-keys=false results in an error for a missing key with go
output_message=$(! kubectl get pod valid-pod --allow-missing-template-keys=false -o go-template='{{.missing}}' "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'map has no entry for key "missing"'
### Test kubectl get watch
output_message=$(kubectl get pods -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'STATUS' # headers
kube::test::if_has_string "${output_message}" 'valid-pod' # pod details
output_message=$(kubectl get pods/valid-pod -o name -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'pods/valid-pod' # resource name
output_message=$(kubectl get pods/valid-pod -o yaml -w --request-timeout=1 "${kube_flags[@]}")
kube::test::if_has_not_string "${output_message}" 'STATUS' # no headers
kube::test::if_has_string "${output_message}" 'name: valid-pod' # yaml
output_message=$(! kubectl get pods/invalid-pod -w --request-timeout=1 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" '"invalid-pod" not found'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
### Test 'kubectl get -f <file> -o <non default printer>' prints all the items in the file's list
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml "${kube_flags[@]}"
# Post-condition: PODs redis-master and valid-pod exist
# Check that all items in the list are printed
output_message=$(kubectl get -f test/fixtures/doc-yaml/user-guide/multi-pod.yaml -o jsonpath="{..metadata.name}" "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "redis-master valid-pod"
# cleanup
kubectl delete pods redis-master valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_request_timeout_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl request timeout"
### Test global request timeout option
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kubectl get "${kube_flags[@]}" pods -o json
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
## check --request-timeout on 'get pod'
output_message=$(kubectl get pod valid-pod --request-timeout=1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout on 'get pod' with --watch
output_message=$(kubectl get pod valid-pod --request-timeout=1 --watch 2>&1)
kube::test::if_has_string "${output_message}" 'Timeout exceeded while reading body'
## check --request-timeout value with no time unit
output_message=$(kubectl get pod valid-pod --request-timeout=1 2>&1)
kube::test::if_has_string "${output_message}" 'valid-pod'
## check --request-timeout value with invalid time unit
output_message=$(! kubectl get pod valid-pod --request-timeout="1p" 2>&1)
kube::test::if_has_string "${output_message}" 'Invalid timeout value'
# cleanup
kubectl delete pods valid-pod "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_crd_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl crd"
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "foos.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "foos",
"kind": "Foo"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'foos.company.com:'
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1beta1",
"metadata": {
"name": "bars.company.com"
},
"spec": {
"group": "company.com",
"version": "v1",
"names": {
"plural": "bars",
"kind": "Bar"
}
}
}
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{$id_field}}:{{end}}" 'bars.company.com:foos.company.com:'
run_non_native_resource_tests
# teardown
kubectl delete customresourcedefinitions/foos.company.com "${kube_flags_with_token[@]}"
kubectl delete customresourcedefinitions/bars.company.com "${kube_flags_with_token[@]}"
set +o nounset
set +o errexit
}
kube::util::non_native_resources() {
local times
local wait
local failed
times=30
wait=10
local i
for i in $(seq 1 $times); do
failed=""
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/foos' || failed=true
kubectl "${kube_flags[@]}" get --raw '/apis/company.com/v1/bars' || failed=true
if [ -z "${failed}" ]; then
return 0
fi
sleep ${wait}
done
kube::log::error "Timed out waiting for non-native-resources; tried ${times} waiting ${wait}s between each"
return 1
}
run_non_native_resource_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl non-native resources"
kube::util::non_native_resources
# Test that we can list this new third party resource (foos)
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can list this new third party resource (bars)
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Foo
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/foo.yaml "${kube_flags[@]}"
# Test that we can list this new third party resource
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test alternate forms
kube::test::get_object_assert foo "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert foos.v1.company.com "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test all printers, with lists and individual items
kube::log::status "Testing ThirdPartyResource printing"
kubectl "${kube_flags[@]}" get foos
kubectl "${kube_flags[@]}" get foos/test
kubectl "${kube_flags[@]}" get foos -o name
kubectl "${kube_flags[@]}" get foos/test -o name
kubectl "${kube_flags[@]}" get foos -o wide
kubectl "${kube_flags[@]}" get foos/test -o wide
kubectl "${kube_flags[@]}" get foos -o json
kubectl "${kube_flags[@]}" get foos/test -o json
kubectl "${kube_flags[@]}" get foos -o yaml
kubectl "${kube_flags[@]}" get foos/test -o yaml
kubectl "${kube_flags[@]}" get foos -o "jsonpath={.items[*].someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "jsonpath={.someField}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos -o "go-template={{range .items}}{{.someField}}{{end}}" --allow-missing-template-keys=false
kubectl "${kube_flags[@]}" get foos/test -o "go-template={{.someField}}" --allow-missing-template-keys=false
output_message=$(kubectl "${kube_flags[@]}" get foos/test -o name)
kube::test::if_has_string "${output_message}" 'foos/test'
# Test patching
kube::log::status "Testing ThirdPartyResource patching"
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value1"}' --type=merge
kube::test::get_object_assert foos/test "{{.patched}}" 'value1'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":"value2"}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" 'value2'
kubectl "${kube_flags[@]}" patch foos/test -p '{"patched":null}' --type=merge --record
kube::test::get_object_assert foos/test "{{.patched}}" '<no value>'
# Get local version
TPR_RESOURCE_FILE="${KUBE_TEMP}/tpr-foos-test.json"
kubectl "${kube_flags[@]}" get foos/test -o json > "${TPR_RESOURCE_FILE}"
# cannot apply strategic patch locally
TPR_PATCH_ERROR_FILE="${KUBE_TEMP}/tpr-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${TPR_PATCH_ERROR_FILE}"
if grep -q "try --type merge" "${TPR_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for ThirdPartyResource: $(cat ${TPR_PATCH_ERROR_FILE})"
else
kube::log::status "\"kubectl patch --local\" returns unexpected error or non-error: $(cat ${TPR_PATCH_ERROR_FILE})"
exit 1
fi
# can apply merge patch locally
kubectl "${kube_flags[@]}" patch --local -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
# can apply merge patch remotely
kubectl "${kube_flags[@]}" patch --record -f "${TPR_RESOURCE_FILE}" -p '{"patched":"value3"}' --type=merge -o json
kube::test::get_object_assert foos/test "{{.patched}}" 'value3'
rm "${TPR_RESOURCE_FILE}"
rm "${TPR_PATCH_ERROR_FILE}"
# Test labeling
kube::log::status "Testing ThirdPartyResource labeling"
kubectl "${kube_flags[@]}" label foos --all listlabel=true
kubectl "${kube_flags[@]}" label foo/test itemlabel=true
# Test annotating
kube::log::status "Testing ThirdPartyResource annotating"
kubectl "${kube_flags[@]}" annotate foos --all listannotation=true
kubectl "${kube_flags[@]}" annotate foo/test itemannotation=true
# Test describing
kube::log::status "Testing ThirdPartyResource describing"
kubectl "${kube_flags[@]}" describe foos
kubectl "${kube_flags[@]}" describe foos/test
kubectl "${kube_flags[@]}" describe foos | grep listlabel=true
kubectl "${kube_flags[@]}" describe foos | grep itemlabel=true
# Delete the resource with cascade.
kubectl "${kube_flags[@]}" delete foos test --cascade=true
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create a new resource of type Bar
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/bar.yaml "${kube_flags[@]}"
# Test that we can list this new third party resource
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that we can watch the resource.
# Start watcher in background with process substitution,
# so we can read from stdout asynchronously.
kube::log::status "Testing ThirdPartyResource watching"
exec 3< <(kubectl "${kube_flags[@]}" get bars --request-timeout=1m --watch-only -o name & echo $! ; wait)
local watch_pid
read <&3 watch_pid
# We can't be sure when the watch gets established,
# so keep triggering events (in the background) until something comes through.
local tries=0
while [ ${tries} -lt 10 ]; do
tries=$((tries+1))
kubectl "${kube_flags[@]}" patch bars/test -p "{\"patched\":\"${tries}\"}" --type=merge
sleep 1
done &
local patch_pid=$!
# Wait up to 30s for a complete line of output.
local watch_output
read <&3 -t 30 watch_output
# Stop the watcher and the patch loop.
kill -9 ${watch_pid}
kill -9 ${patch_pid}
kube::test::if_has_string "${watch_output}" 'bars/test'
# Delete the resource without cascade.
kubectl "${kube_flags[@]}" delete bars test --cascade=false
# Make sure it's gone
kube::test::wait_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create single item via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo.yaml
# Test that we have create a foo named test
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply an empty patch doesn't change fields
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test '{{.someField}}' 'field1'
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'subfield1'
# Update a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-updated-subfield.yaml
# Test that apply has updated the subfield
kube::test::get_object_assert foos/test '{{.nestedField.someSubfield}}' 'modifiedSubfield'
# Test that the field has the expected value
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' 'subfield2'
# Delete a subfield and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-deleted-subfield.yaml
# Test that apply has deleted the field
kube::test::get_object_assert foos/test '{{.nestedField.otherSubfield}}' '<no value>'
# Test that the field does not exist
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/foo-added-subfield.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test '{{.nestedField.newSubfield}}' 'subfield3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/foo.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
# Test that we can create list via apply
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list.yaml
# Test that we have create a foo and a bar from a list
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test-list:'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that re-apply an list doesn't change anything
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list.yaml
# Test that the field has the same value after re-apply
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Test that the fields have the expected value
kube::test::get_object_assert foos/test-list '{{.someField}}' 'field1'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'field1'
# Update fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-updated-field.yaml
# Test that apply has updated the fields
kube::test::get_object_assert foos/test-list '{{.someField}}' 'modifiedField'
kube::test::get_object_assert bars/test-list '{{.someField}}' 'modifiedField'
# Test that the field has the expected value
kube::test::get_object_assert foos/test-list '{{.otherField}}' 'field2'
kube::test::get_object_assert bars/test-list '{{.otherField}}' 'field2'
# Delete fields and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-deleted-field.yaml
# Test that apply has deleted the fields
kube::test::get_object_assert foos/test-list '{{.otherField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.otherField}}' '<no value>'
# Test that the fields does not exist
kube::test::get_object_assert foos/test-list '{{.newField}}' '<no value>'
kube::test::get_object_assert bars/test-list '{{.newField}}' '<no value>'
# Add a field and then apply the change
kubectl "${kube_flags[@]}" apply -f hack/testdata/TPR/multi-tpr-list-added-field.yaml
# Test that apply has added the field
kube::test::get_object_assert foos/test-list '{{.newField}}' 'field3'
kube::test::get_object_assert bars/test-list '{{.newField}}' 'field3'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/multi-tpr-list.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
## kubectl apply --prune
# Test that no foo or bar exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on foo.yaml that has foo/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/TPR/foo.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right tprs exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" 'test:'
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# apply --prune on bar.yaml that has bar/test
kubectl apply --prune -l pruneGroup=true -f hack/testdata/TPR/bar.yaml "${kube_flags[@]}" --prune-whitelist=company.com/v1/Foo --prune-whitelist=company.com/v1/Bar
# check right tprs exist
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" 'test:'
# Delete the resource
kubectl "${kube_flags[@]}" delete -f hack/testdata/TPR/bar.yaml
# Make sure it's gone
kube::test::get_object_assert foos "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert bars "{{range.items}}{{$id_field}}:{{end}}" ''
# Test 'kubectl create' with namespace, and namespace cleanup.
kubectl "${kube_flags[@]}" create namespace non-native-resources
kubectl "${kube_flags[@]}" create -f hack/testdata/TPR/bar.yaml --namespace=non-native-resources
kube::test::get_object_assert bars '{{len .items}}' '1' --namespace=non-native-resources
kubectl "${kube_flags[@]}" delete namespace non-native-resources
# Make sure objects go away.
kube::test::wait_object_assert bars '{{len .items}}' '0' --namespace=non-native-resources
# Make sure namespace goes away.
local tries=0
while kubectl "${kube_flags[@]}" get namespace non-native-resources && [ ${tries} -lt 10 ]; do
tries=$((tries+1))
sleep ${tries}
done
set +o nounset
set +o errexit
}
run_recursive_resources_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing recursive resources"
### Create multiple busybox PODs recursively from directory of YAML files
# Pre-condition: no POD exists
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
output_message=$(! kubectl create -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are created, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Edit multiple busybox PODs by updating the image field of multiple PODs recursively from a directory. tmp-editor.sh is a fake editor
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
echo -e '#!/bin/bash\nsed -i "s/image: busybox/image: prom\/busybox/g" $1' > /tmp/tmp-editor.sh
chmod +x /tmp/tmp-editor.sh
output_message=$(! EDITOR=/tmp/tmp-editor.sh kubectl edit -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are not edited, and since busybox2 is malformed, it should error
# The reason why busybox0 & busybox1 PODs are not edited is because the editor tries to load all objects in
# a list but since it contains invalid objects, it will never open.
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'busybox:busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# cleaning
rm /tmp/tmp-editor.sh
## Replace multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl replace -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are replaced, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
## Describe multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl describe -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are described, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "app=busybox0"
kube::test::if_has_string "${output_message}" "app=busybox1"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Annotate multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl annotate -f hack/testdata/recursive/pod annotatekey='annotatevalue' --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are annotated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${annotations_field}.annotatekey}}:{{end}}" 'annotatevalue:annotatevalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Apply multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl apply -f hack/testdata/recursive/pod-modify --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are updated, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.status}}:{{end}}" 'replaced:replaced:'
kube::test::if_has_string "${output_message}" 'error validating data: kind not set'
### Convert deployment YAML file locally without affecting the live deployment.
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Command
output_message=$(kubectl convert --local -f hack/testdata/deployment-revision1.yaml --output-version=apps/v1beta1 -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")
echo $output_message
# Post-condition: apiVersion is still extensions/v1beta1 in the live deployment, but command output is the new value
kube::test::get_object_assert 'deployment nginx' "{{ .apiVersion }}" 'extensions/v1beta1'
kube::test::if_has_string "${output_message}" "apps/v1beta1"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
## Convert multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl convert -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are converted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Get multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl get -f hack/testdata/recursive/pod --recursive 2>&1 "${kube_flags[@]}" -o go-template="{{range.items}}{{$id_field}}:{{end}}")
# Post-condition: busybox0 & busybox1 PODs are retrieved, but because busybox2 is malformed, it should not show up
kube::test::if_has_string "${output_message}" "busybox0:busybox1:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Label multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl label -f hack/testdata/recursive/pod mylabel='myvalue' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are labeled, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{${labels_field}.mylabel}}:{{end}}" 'myvalue:myvalue:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Patch multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl patch -f hack/testdata/recursive/pod -p='{"spec":{"containers":[{"name":"busybox","image":"prom/busybox"}]}}' --recursive 2>&1 "${kube_flags[@]}")
echo $output_message
# Post-condition: busybox0 & busybox1 PODs are patched, but because busybox2 is malformed, it should not show up
kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'prom/busybox:prom/busybox:'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox PODs recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/pod --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 PODs are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Create replication controller recursively from directory of YAML files
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
### Autoscale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl autoscale --min=1 --max=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox replication controllers are autoscaled
# with min. of 1 replica & max of 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'hpa busybox0' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::get_object_assert 'hpa busybox1' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kubectl delete hpa busybox0 "${kube_flags[@]}"
kubectl delete hpa busybox1 "${kube_flags[@]}"
### Expose multiple replication controllers as service recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl expose -f hack/testdata/recursive/rc --recursive --port=80 2>&1 "${kube_flags[@]}")
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service busybox0' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::get_object_assert 'service busybox1' "{{$port_name}} {{$port_field}}" '<no value> 80'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Scale multiple replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 replication controllers exist & 1
# replica each
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '1'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '1'
# Command
output_message=$(! kubectl scale --current-replicas=1 --replicas=2 -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are scaled to 2 replicas, and since busybox2 is malformed, it should error
kube::test::get_object_assert 'rc busybox0' "{{$rc_replicas_field}}" '2'
kube::test::get_object_assert 'rc busybox1' "{{$rc_replicas_field}}" '2'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Delete multiple busybox replication controllers recursively from directory of YAML files
# Pre-condition: busybox0 & busybox1 PODs exist
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
output_message=$(! kubectl delete -f hack/testdata/recursive/rc --recursive --grace-period=0 --force 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 replication controllers are deleted, and since busybox2 is malformed, it should error
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
### Rollout on multiple deployments recursively
# Pre-condition: no deployments exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/deployment --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should be a no-op, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Pause the deployments recursively
PRESERVE_ERR_FILE=true
kubectl-with-retry rollout pause -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to true, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "true:true:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Resume the deployments recursively
kubectl-with-retry rollout resume -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
output_message=$(cat ${ERROR_FILE})
# Post-condition: nginx0 & nginx1 should both have paused set to nothing, and since nginx2 is malformed, it should error
kube::test::get_object_assert deployment "{{range.items}}{{.spec.paused}}:{{end}}" "<no value>:<no value>:"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Retrieve the rollout history of the deployments recursively
output_message=$(! kubectl rollout history -f hack/testdata/recursive/deployment --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: nginx0 & nginx1 should both have a history, and since nginx2 is malformed, it should error
kube::test::if_has_string "${output_message}" "nginx0-deployment"
kube::test::if_has_string "${output_message}" "nginx1-deployment"
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
output_message=$(! kubectl rollout undo -f hack/testdata/recursive/rc --recursive --to-revision=1 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" 'no rollbacker has been implemented for {"" "ReplicationController"}'
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
## Attempt to pause the replication controllers recursively
output_message=$(! kubectl rollout pause -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" pausing is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" pausing is not supported'
## Attempt to resume the replication controllers recursively
output_message=$(! kubectl rollout resume -f hack/testdata/recursive/rc --recursive 2>&1 "${kube_flags[@]}")
# Post-condition: busybox0 & busybox1 should error as they are RC's, and since busybox2 is malformed, it should error
kube::test::if_has_string "${output_message}" "Object 'Kind' is missing"
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
sleep 1
set +o nounset
set +o errexit
}
run_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:namespaces)"
### Create a new namespace
# Pre-condition: only the "default" namespace exists
# The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test.
# kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:'
# Command
kubectl create namespace my-namespace
# Post-condition: namespace 'my-namespace' is created.
kube::test::get_object_assert 'namespaces/my-namespace' "{{$id_field}}" 'my-namespace'
# Clean up
kubectl delete namespace my-namespace
######################
# Pods in Namespaces #
######################
if kube::test::if_supports_resource "${pods}" ; then
### Create a new namespace
# Pre-condition: the other namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace other
# Post-condition: namespace 'other' is created.
kube::test::get_object_assert 'namespaces/other' "{{$id_field}}" 'other'
### Create POD valid-pod in specific namespace
# Pre-condition: no POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" --namespace=other -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod POD is created
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: verify shorthand `-n other` has the same results as `--namespace=other`
kube::test::get_object_assert 'pods -n other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Post-condition: a resource cannot be retrieved by name across all namespaces
output_message=$(! kubectl get "${kube_flags[@]}" pod valid-pod --all-namespaces 2>&1)
kube::test::if_has_string "${output_message}" "a resource cannot be retrieved by name across all namespaces"
### Delete POD valid-pod in specific namespace
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''
# Clean up
kubectl delete namespace other
fi
set +o nounset
set +o errexit
}
run_secrets_test() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing secrets"
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-secrets
# Post-condition: namespace 'test-secrets' is created.
kube::test::get_object_assert 'namespaces/test-secrets' "{{$id_field}}" 'test-secrets'
### Create a generic secret in a specific namespace
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret generic test-secret --from-literal=key1=value1 --type=test-type --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'test-type'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep 'key1: dmFsdWUx')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a docker-registry secret in a specific namespace
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret docker-registry test-secret --docker-username=test-user --docker-password=test-password --docker-email='[email protected]' --namespace=test-secrets
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/dockercfg'
[[ "$(kubectl get secret/test-secret --namespace=test-secrets -o yaml "${kube_flags[@]}" | grep '.dockercfg:')" ]]
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
### Create a tls secret
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no SECRET exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create secret tls test-secret --namespace=test-secrets --key=hack/testdata/tls.key --cert=hack/testdata/tls.crt
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$id_field}}" 'test-secret'
kube::test::get_object_assert 'secret/test-secret --namespace=test-secrets' "{{$secret_type}}" 'kubernetes.io/tls'
# Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Create a secret using stringData
kubectl create --namespace=test-secrets -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Secret",
"apiVersion": "v1",
"metadata": {
"name": "secret-string-data"
},
"data": {
"k1":"djE=",
"k2":""
},
"stringData": {
"k2":"v2"
}
}
__EOF__
# Post-condition: secret-string-data secret is created with expected data, merged/overridden data from stringData, and a cleared stringData field
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k1:djE=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.data}}' '.*k2:djI=.*'
kube::test::get_object_assert 'secret/secret-string-data --namespace=test-secrets ' '{{.stringData}}' '<no value>'
# Clean up
kubectl delete secret secret-string-data --namespace=test-secrets
### Create a secret using output flags
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
fi
# Pre-condition: no secret exists
kube::test::get_object_assert 'secrets --namespace=test-secrets' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
[[ "$(kubectl create secret generic test-secret --namespace=test-secrets --from-literal=key1=value1 --output=go-template --template=\"{{.metadata.name}}:\" | grep 'test-secret:')" ]]
## Clean-up
kubectl delete secret test-secret --namespace=test-secrets
# Clean up
kubectl delete namespace test-secrets
set +o nounset
set +o errexit
}
run_configmap_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing configmaps"
kubectl create -f test/fixtures/doc-yaml/user-guide/configmap/configmap.yaml
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}{{end}}" 'test-configmap'
kubectl delete configmap test-configmap "${kube_flags[@]}"
### Create a new namespace
# Pre-condition: the test-configmaps namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-configmaps
# Post-condition: namespace 'test-configmaps' is created.
kube::test::get_object_assert 'namespaces/test-configmaps' "{{$id_field}}" 'test-configmaps'
### Create a generic configmap in a specific namespace
# Pre-condition: no configmaps namespace exists
kube::test::get_object_assert 'configmaps --namespace=test-configmaps' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
# Post-condition: configmap exists and has expected values
kube::test::get_object_assert 'configmap/test-configmap --namespace=test-configmaps' "{{$id_field}}" 'test-configmap'
[[ "$(kubectl get configmap/test-configmap --namespace=test-configmaps -o yaml "${kube_flags[@]}" | grep 'key1: value1')" ]]
# Clean-up
kubectl delete configmap test-configmap --namespace=test-configmaps
kubectl delete namespace test-configmaps
set +o nounset
set +o errexit
}
run_service_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:services)"
### Create redis-master service from JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Describe command should print detailed information
kube::test::describe_object_assert services 'redis-master' "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_object_events_assert services 'redis-master'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert services 'redis-master' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert services 'redis-master' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert services "Name:" "Labels:" "Selector:" "IP:" "Port:" "Endpoints:" "Session Affinity:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert services
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert services false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert services true
### set selector
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Set selector of a local file without talking to the server
kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
# Set command to change the selector.
kubectl set selector -f examples/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "padawan:"
# Set command to reset the selector back to the original one.
kubectl set selector -f examples/guestbook/redis-master-service.yaml app=redis,role=master,tier=backend
# prove role=master
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Show dry-run works on running selector
kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
### Dump current redis-master service
output_service=$(kubectl get service redis-master -o json --output-version=v1 "${kube_flags[@]}")
### Delete redis-master-service by id
# Pre-condition: redis-master service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create redis-master-service from dumped JSON
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
echo "${output_service}" | kubectl create -f - "${kube_flags[@]}"
# Post-condition: redis-master service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
### Create redis-master-v1-test service
# Pre-condition: redis-master-service service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:'
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "service-v1-test"
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
__EOF__
# Post-condition: service-v1-test service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
### Identity
kubectl get service "${kube_flags[@]}" service-v1-test -o json | kubectl replace "${kube_flags[@]}" -f -
### Delete services by id
# Pre-condition: service-v1-test exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:service-.*-test:'
# Command
kubectl delete service redis-master "${kube_flags[@]}"
kubectl delete service "service-v1-test" "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create two services
# Pre-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create -f examples/guestbook/redis-master-service.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-service.yaml "${kube_flags[@]}"
# Post-condition: redis-master and redis-slave services are created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
### Custom columns can be specified
# Pre-condition: generate output using custom columns
output_message=$(kubectl get services -o=custom-columns=NAME:.metadata.name,RSRC:.metadata.resourceVersion 2>&1 "${kube_flags[@]}")
# Post-condition: should contain name column
kube::test::if_has_string "${output_message}" 'redis-master'
### Delete multiple services at once
# Pre-condition: redis-master and redis-slave services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:redis-master:redis-slave:'
# Command
kubectl delete services redis-master redis-slave "${kube_flags[@]}" # delete multiple services at once
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
### Create an ExternalName service
# Pre-condition: Only the default kubernetes service exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
# Command
kubectl create service externalname beep-boop --external-name bar.com
# Post-condition: beep-boop service is created
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
### Delete beep-boop service by id
# Pre-condition: beep-boop service exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'beep-boop:kubernetes:'
# Command
kubectl delete service beep-boop "${kube_flags[@]}"
if [[ "${WAIT_FOR_DELETION:-}" == "true" ]]; then
kube::test::wait_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
fi
# Post-condition: Only the default kubernetes services exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:'
set +o nounset
set +o errexit
}
run_rc_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicationcontrollers)"
### Create and stop controller, make sure it doesn't leak pods
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::get_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rc 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rc 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rc 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rc 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rc "Name:" "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" "GET_HOSTS_FROM:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rc
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rc false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rc true
### Scale replication controller frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with (wrong) current-replicas and replicas
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
### Scale replication controller frontend with replicas only
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale replication controller from JSON with replicas only
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl scale --replicas=2 -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Clean-up
kubectl delete rc frontend "${kube_flags[@]}"
### Scale multiple replication controllers
kubectl create -f examples/guestbook/legacy/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4 "${kube_flags[@]}"
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Scale a job
kubectl create -f test/fixtures/doc-yaml/user-guide/job.yaml "${kube_flags[@]}"
# Command
kubectl scale --replicas=2 job/pi
# Post-condition: 2 replicas for pi
kube::test::get_object_assert 'job pi' "{{$job_parallelism_field}}" '2'
# Clean-up
kubectl delete job/pi "${kube_flags[@]}"
### Scale a deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Command
kubectl scale --current-replicas=3 --replicas=1 deployment/nginx-deployment
# Post-condition: 1 replica for nginx-deployment
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '1'
# Clean-up
kubectl delete deployment/nginx-deployment "${kube_flags[@]}"
### Expose a deployment as a service
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'deployment nginx-deployment' "{{$deployment_replicas}}" '3'
# Command
kubectl expose deployment/nginx-deployment
# Post-condition: service exists and exposes deployment port (80)
kube::test::get_object_assert 'service nginx-deployment' "{{$port_field}}" '80'
# Clean-up
kubectl delete deployment/nginx-deployment service/nginx-deployment "${kube_flags[@]}"
### Expose replication controller as service
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
# Command
kubectl expose rc frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Command
kubectl expose service frontend --port=443 --name=frontend-2 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" '<no value> 443'
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
kubectl expose pod valid-pod --port=444 --name=frontend-3 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend-3' "{{$port_name}} {{$port_field}}" '<no value> 444'
# Create a service using service/v1 generator
kubectl expose rc frontend --port=80 --name=frontend-4 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-4' "{{$port_name}} {{$port_field}}" 'default 80'
# Verify that expose service works without specifying a port.
kubectl expose service frontend --name=frontend-5 "${kube_flags[@]}"
# Post-condition: service exists with the same port as the original service.
kube::test::get_object_assert 'service frontend-5' "{{$port_field}}" '80'
# Cleanup services
kubectl delete pod valid-pod "${kube_flags[@]}"
kubectl delete service frontend{,-2,-3,-4,-5} "${kube_flags[@]}"
### Expose negative invalid resource test
# Pre-condition: don't need
# Command
output_message=$(! kubectl expose nodes 127.0.0.1 2>&1 "${kube_flags[@]}")
# Post-condition: the error message has "cannot expose" string
kube::test::if_has_string "${output_message}" 'cannot expose'
### Try to generate a service with invalid name (exceeding maximum valid size)
# Pre-condition: use --name flag
output_message=$(! kubectl expose -f hack/testdata/pod-with-large-name.yaml --name=invalid-large-service-name-that-has-more-than-sixty-three-characters --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: should fail due to invalid name
kube::test::if_has_string "${output_message}" 'metadata.name: Invalid value'
# Pre-condition: default run without --name flag; should succeed by truncating the inherited name
output_message=$(kubectl expose -f hack/testdata/pod-with-large-name.yaml --port=8081 2>&1 "${kube_flags[@]}")
# Post-condition: inherited name from pod has been truncated
kube::test::if_has_string "${output_message}" '\"kubernetes-serve-hostname-testing-sixty-three-characters-in-len\" exposed'
# Clean-up
kubectl delete svc kubernetes-serve-hostname-testing-sixty-three-characters-in-len "${kube_flags[@]}"
### Expose multiport object as a new service
# Pre-condition: don't use --port flag
output_message=$(kubectl expose -f test/fixtures/doc-yaml/admin/high-availability/etcd.yaml --selector=test=etcd 2>&1 "${kube_flags[@]}")
# Post-condition: expose succeeded
kube::test::if_has_string "${output_message}" '\"etcd-server\" exposed'
# Post-condition: generated service has both ports from the exposed pod
kube::test::get_object_assert 'service etcd-server' "{{$port_name}} {{$port_field}}" 'port-1 2380'
kube::test::get_object_assert 'service etcd-server' "{{$second_port_name}} {{$second_port_field}}" 'port-2 2379'
# Clean-up
kubectl delete svc etcd-server "${kube_flags[@]}"
### Delete replication controller with id
# Pre-condition: frontend replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replication controllers
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/legacy/redis-slave-controller.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple controllers at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rc frontend redis-slave "${kube_flags[@]}" # delete multiple controllers at once
# Post-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
### Auto scale replication controller
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, rc specified by file
kubectl autoscale -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, rc specified by name
kubectl autoscale rc frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
## Set resource limits/request of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's cpu limits
kubectl set resources deployment nginx-deployment-resources --limits=cpu=100m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set limits/requests of a deployment specified by a file
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Clean up
kubectl delete deployment nginx-deployment-resources "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_deployment_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing deployments"
# Test kubectl create deployment (using default - old generator)
kubectl create deployment test-nginx-extensions --image=gcr.io/google-containers/nginx:test-cmd
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx'
# and old generator was used, iow. old defaults are applied
output_message=$(kubectl get deployment.extensions/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_not_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1beta1'
# Clean up
kubectl delete deployment test-nginx-extensions "${kube_flags[@]}"
# Test kubectl create deployment
kubectl create deployment test-nginx-apps --image=gcr.io/google-containers/nginx:test-cmd --generator=deployment-basic/apps.v1beta1
# Post-Condition: Deployment "nginx" is created.
kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx'
# and new generator was used, iow. new defaults are applied
output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
kube::test::if_has_string "${output_message}" '2'
# Ensure we can interact with deployments through extensions and apps endpoints
output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'extensions/v1beta1'
output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" 'apps/v1beta1'
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
# Clean up
kubectl delete deployment test-nginx-apps "${kube_flags[@]}"
### Test kubectl create deployment should not fail validation
# Pre-Condition: No deployment exists.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}"
# Post-Condition: Deployment "deployment-with-unixuserid" is created.
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:'
# Clean up
kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}"
### Test cascading deletion
## Test that rs is deleted when deployment is deleted.
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3'
# Deleting the deployment should delete the rs.
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
## Test that rs is not deleted when deployment is deleted with cascade set to false.
# Pre-condition: no deployment and rs exist
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create deployment
kubectl create deployment nginx-deployment --image=gcr.io/google-containers/nginx:test-cmd
# Wait for rs to come up.
kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Delete the deployment with cascade set to false.
kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false
# Wait for the deployment to be deleted and then verify that rs is not
# deleted.
kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1'
# Cleanup
# Find the name of the rs to be deleted.
output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}})
kubectl delete rs ${output_message} "${kube_flags[@]}"
### Auto scale deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
# Clean up
# Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
kubectl delete hpa nginx-deployment "${kube_flags[@]}"
kubectl delete deployment.extensions nginx-deployment "${kube_flags[@]}"
### Rollback a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a deployment (revision 1)
kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Update the deployment (revision 2)
kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd"
kube::test::get_object_assert deployment.extensions "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Rollback to revision 1
kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]}"
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}"
# The resumed deployment can now be rolled back
kubectl rollout undo deployment nginx "${kube_flags[@]}"
# Check that the new replica set has all old revisions stored in an annotation
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
cat hack/testdata/deployment-revision1.yaml | $SED "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}"
# Deletion of both deployments should not be blocked
kubectl delete deployment nginx2 "${kube_flags[@]}"
# Clean up
kubectl delete deployment nginx "${kube_flags[@]}"
### Set image of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set the deployment's image
kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}"
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a deployment specified by file
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of a local file without talking to the server
kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:"
# Set image of all containers of the deployment
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Set image of all containners of the deployment again when image not change
kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
### Set env of a deployment
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:'
kube::test::get_object_assert configmap "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-config:'
kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:'
# Set env of deployments for all container
kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}"
# Set env of deployments for specific container
kubectl set env deployment nginx-deployment env=prod -c=nginx "${kube_flags[@]}"
# Set env of deployments by configmap
kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}"
# Set env of deployments by secret
kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}"
# Remove specific env of deployment
kubectl set env deployment nginx-deployment env-
# Clean up
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete configmap test-set-env-config "${kube_flags[@]}"
kubectl delete secret test-set-env-secret "${kube_flags[@]}"
### Delete a deployment with initializer
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/deployment-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'deployment web' "{{$id_field}}" 'web'
# Delete a deployment
kubectl delete deployment web "${kube_flags[@]}"
# Check Deployment web doesn't exist
output_message=$(! kubectl get deployment web 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"web" not found'
set +o nounset
set +o errexit
}
run_rs_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]}" --cascade=false
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]}"
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Describe command should print detailed information
kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_object_events_assert rs 'frontend'
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert rs 'frontend' false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert rs 'frontend' true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert rs
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert rs false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert rs true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Created By" "Controlled By"
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
### Expose replica set as service
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Delete a rs with initializer
# Pre-condition: no rs exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Create a rs
kubectl create --request-timeout=1 -f hack/testdata/replicaset-with-initializer.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert 'rs nginx' "{{$id_field}}" 'nginx'
# Delete a rs
kubectl delete rs nginx "${kube_flags[@]}"
# check rs nginx doesn't exist
output_message=$(! kubectl get rs nginx 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" '"nginx" not found'
if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then
### Auto scale replica set
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]}"
# Clean up
kubectl delete rs frontend "${kube_flags[@]}"
fi
set +o nounset
set +o errexit
}
run_daemonset_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets)"
### Create a rolling update DaemonSet
# Pre-condition: no DaemonSet exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should be 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
# Template Generation should stay 1
kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1'
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_daemonset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
### Test rolling back a DaemonSet
# Pre-condition: no DaemonSet or its pods exists
kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a DaemonSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the DaemonSet (revision 2)
kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo daemonset "${kube_flags[@]}"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up
kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_statefulset_history_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
### Test rolling back a StatefulSet
# Pre-condition: no statefulset or its pods exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create a StatefulSet (revision 1)
kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
# Rollback to revision 1 - should be no-op
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Update the statefulset (revision 2)
kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
# Rollback to revision 1 with dry-run - should be no-op
kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Rollback to revision 1
kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to revision 1000000 - should fail
output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1)
kube::test::if_has_string "${output_message}" "unable to find specified revision"
kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1"
# Rollback to last revision
kubectl rollout undo statefulset "${kube_flags[@]}"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:"
kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2"
# Clean up - delete newest configuration
kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_multi_resources_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:multiple resources)"
FILES="hack/testdata/multi-resource-yaml
hack/testdata/multi-resource-list
hack/testdata/multi-resource-json
hack/testdata/multi-resource-rclist
hack/testdata/multi-resource-svclist"
YAML=".yaml"
JSON=".json"
for file in $FILES; do
if [ -f $file$YAML ]
then
file=$file$YAML
replace_file="${file%.yaml}-modify.yaml"
else
file=$file$JSON
replace_file="${file%.json}-modify.json"
fi
has_svc=true
has_rc=true
two_rcs=false
two_svcs=false
if [[ "${file}" == *rclist* ]]; then
has_svc=false
two_rcs=true
fi
if [[ "${file}" == *svclist* ]]; then
has_rc=false
two_svcs=true
fi
### Create, get, describe, replace, label, annotate, and then delete service nginxsvc and replication controller my-nginx from 5 types of files:
### 1) YAML, separated by ---; 2) JSON, with a List type; 3) JSON, with JSON object concatenation
### 4) JSON, with a ReplicationControllerList type; 5) JSON, with a ServiceList type
echo "Testing with file ${file} and replace with file ${replace_file}"
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f "${file}" "${kube_flags[@]}"
# Post-condition: mock service (and mock2) exists
if [ "$has_svc" = true ]; then
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Post-condition: mock rc (and mock2) exists
if [ "$has_rc" = true ]; then
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:mock2:'
else
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
fi
fi
# Command
kubectl get -f "${file}" "${kube_flags[@]}"
# Command: watching multiple resources should return "not supported" error
WATCH_ERROR_FILE="${KUBE_TEMP}/kubectl-watch-error"
kubectl get -f "${file}" "${kube_flags[@]}" "--watch" 2> ${WATCH_ERROR_FILE} || true
if ! grep -q "watch is only supported on individual resources and resource collections" "${WATCH_ERROR_FILE}"; then
kube::log::error_exit "kubectl watch multiple resource returns unexpected error or non-error: $(cat ${WATCH_ERROR_FILE})" "1"
fi
kubectl describe -f "${file}" "${kube_flags[@]}"
# Command
kubectl replace -f $replace_file --force --cascade "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are replaced
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'replaced'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'replaced'
fi
fi
# Command: kubectl edit multiple resources
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/bin/bash\n$SED -i \"s/status\:\ replaced/status\:\ edited/g\" \$@" > "${temp_editor}"
chmod +x "${temp_editor}"
EDITOR="${temp_editor}" kubectl edit "${kube_flags[@]}" -f "${file}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are edited
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.status}}" 'edited'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.status}}" 'edited'
fi
fi
# cleaning
rm "${temp_editor}"
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl label"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing labels.
kubectl-with-retry label -f $file labeled=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service and mock rc (and mock2) are labeled
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${labels_field}.labeled}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${labels_field}.labeled}}" 'true'
fi
fi
# Command
# Command
# We need to set --overwrite, because otherwise, if the first attempt to run "kubectl annotate"
# fails on some, but not all, of the resources, retries will fail because it tries to modify
# existing annotations.
kubectl-with-retry annotate -f $file annotated=true --overwrite "${kube_flags[@]}"
# Post-condition: mock service (and mock2) and mock rc (and mock2) are annotated
if [ "$has_svc" = true ]; then
kube::test::get_object_assert 'services mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_svcs" = true ]; then
kube::test::get_object_assert 'services mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
if [ "$has_rc" = true ]; then
kube::test::get_object_assert 'rc mock' "{{${annotations_field}.annotated}}" 'true'
if [ "$two_rcs" = true ]; then
kube::test::get_object_assert 'rc mock2' "{{${annotations_field}.annotated}}" 'true'
fi
fi
# Cleanup resources created
kubectl delete -f "${file}" "${kube_flags[@]}"
done
#############################
# Multiple Resources via URL#
#############################
# Pre-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: service(mock) and rc(mock) exist
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'mock:'
# Clean up
kubectl delete -f https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/testdata/multi-resource-yaml.yaml "${kube_flags[@]}"
# Post-condition: no service (other than default kubernetes services) or replication controller exists
kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_config_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:config set)"
kubectl config set-cluster test-cluster --server="https://does-not-work"
# Get the api cert and add a comment to avoid flag parsing problems
cert_data=$(echo "#Comment" && cat "${TMPDIR:-/tmp}/apiserver.crt")
kubectl config set clusters.test-cluster.certificate-authority-data "$cert_data" --set-raw-bytes
r_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
encoded=$(echo -n "$cert_data" | base64)
kubectl config set clusters.test-cluster.certificate-authority-data "$encoded"
e_writen=$(kubectl config view --raw -o jsonpath='{.clusters[?(@.name == "test-cluster")].cluster.certificate-authority-data}')
test "$e_writen" == "$r_writen"
set +o nounset
set +o errexit
}
run_kubectl_local_proxy_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl local proxy"
# Make sure the UI can be proxied
start-proxy
check-curl-proxy-code /ui 307
check-curl-proxy-code /api/ui 404
check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /metrics 200
fi
if kube::test::if_supports_resource "${static}" ; then
check-curl-proxy-code /static/ 200
fi
stop-proxy
# Make sure the in-development api is accessible by default
start-proxy
check-curl-proxy-code /apis 200
check-curl-proxy-code /apis/extensions/ 200
stop-proxy
# Custom paths let you see everything.
start-proxy /custom
check-curl-proxy-code /custom/ui 307
if kube::test::if_supports_resource "${metrics}" ; then
check-curl-proxy-code /custom/metrics 200
fi
check-curl-proxy-code /custom/api/v1/namespaces 200
stop-proxy
set +o nounset
set +o errexit
}
run_RESTMapper_evaluation_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing RESTMapper"
RESTMAPPER_ERROR_FILE="${KUBE_TEMP}/restmapper-error"
### Non-existent resource type should give a recognizeable error
# Pre-condition: None
# Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})"
else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})"
exit 1
fi
rm "${RESTMAPPER_ERROR_FILE}"
# Post-condition: None
set +o nounset
set +o errexit
}
run_clusterroles_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing clusterroles"
# make sure the server was properly bootstrapped with clusterroles and bindings
kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin'
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
# test `kubectl create clusterrole`
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kubectl create "${kube_flags[@]}" clusterrole resource-reader --verb=get,list --resource=pods,deployments.extensions
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:deployments:'
kube::test::get_object_assert clusterrole/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
kubectl create "${kube_flags[@]}" clusterrole resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert clusterrole/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
kubectl create "${kube_flags[@]}" clusterrole url-reader --verb=get --non-resource-url=/logs/* --non-resource-url=/healthz/*
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'
# test `kubectl create rolebinding/clusterrolebinding`
# test `kubectl set subject rolebinding/clusterrolebinding`
kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-group --clusterrole=admin --group=the-group
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-group --group=foo
kube::test::get_object_assert clusterrolebinding/super-group "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" clusterrolebinding super-sa --clusterrole=admin --serviceaccount=otherns:sa-name
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" clusterrolebinding super-sa --serviceaccount=otherfoo:foo
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert clusterrolebinding/super-sa "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:'
kubectl set subject "${kube_flags[@]}" rolebinding admin --user=foo
kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:foo:'
kubectl create "${kube_flags[@]}" rolebinding localrole --role=localrole --group=the-group
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:'
kubectl set subject "${kube_flags[@]}" rolebinding localrole --group=foo
kube::test::get_object_assert rolebinding/localrole "{{range.subjects}}{{.name}}:{{end}}" 'the-group:foo:'
kubectl create "${kube_flags[@]}" rolebinding sarole --role=localrole --serviceaccount=otherns:sa-name
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:'
kubectl set subject "${kube_flags[@]}" rolebinding sarole --serviceaccount=otherfoo:foo
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.namespace}}:{{end}}" 'otherns:otherfoo:'
kube::test::get_object_assert rolebinding/sarole "{{range.subjects}}{{.name}}:{{end}}" 'sa-name:foo:'
set +o nounset
set +o errexit
}
run_role_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing role"
# Create Role from command (only resource)
kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-pod-admin --verb=* --resource=invalid-resource 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"invalid-resource\""
# Create Role from command (resource + group)
kubectl create "${kube_flags[@]}" role group-reader --verb=get,list --resource=deployments.extensions
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'deployments:'
kube::test::get_object_assert role/group-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=deployments.invalid-group 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"deployments\" in group \"invalid-group\""
# Create Role from command (resource / subresource)
kubectl create "${kube_flags[@]}" role subresource-reader --verb=get,list --resource=pods/status
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:'
kube::test::get_object_assert role/subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
# Create Role from command (resource + group / subresource)
kubectl create "${kube_flags[@]}" role group-subresource-reader --verb=get,list --resource=replicasets.extensions/scale
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'replicasets/scale:'
kube::test::get_object_assert role/group-subresource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" 'extensions:'
output_message=$(! kubectl create "${kube_flags[@]}" role invalid-group --verb=get,list --resource=rs.invalid-group/scale 2>&1)
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type \"rs\" in group \"invalid-group\""
# Create Role from command (resource + resourcename)
kubectl create "${kube_flags[@]}" role resourcename-reader --verb=get,list --resource=pods --resource-name=foo
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods:'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':'
kube::test::get_object_assert role/resourcename-reader "{{range.rules}}{{range.resourceNames}}{{.}}:{{end}}{{end}}" 'foo:'
# Create Role from command (multi-resources)
kubectl create "${kube_flags[@]}" role resource-reader --verb=get,list --resource=pods/status,deployments.extensions
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:list:get:list:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.resources}}{{.}}:{{end}}{{end}}" 'pods/status:deployments:'
kube::test::get_object_assert role/resource-reader "{{range.rules}}{{range.apiGroups}}{{.}}:{{end}}{{end}}" ':extensions:'
set +o nounset
set +o errexit
}
run_assert_short_name_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing assert short name"
kube::log::status "Testing propagation of short names for resources"
output_message=$(kubectl get --raw=/api/v1)
## test if a short name is exported during discovery
kube::test::if_has_string "${output_message}" '{"name":"configmaps","singularName":"","namespaced":true,"kind":"ConfigMap","verbs":\["create","delete","deletecollection","get","list","patch","update","watch"\],"shortNames":\["cm"\]}'
set +o nounset
set +o errexit
}
run_assert_categories_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing propagation of categories for resources"
output_message=$(kubectl get --raw=/api/v1 | grep -Po '"name":"pods".*?}')
kube::test::if_has_string "${output_message}" '"categories":\["all"\]'
set +o nounset
set +o errexit
}
run_kubectl_create_error_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl create with error"
# Passing no arguments to create is an error
! kubectl create
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"
kubectl create -f hack/testdata/invalid-rc-with-empty-args.yaml "${kube_flags[@]}" 2> "${ERROR_FILE}" || true
# Post-condition: should get an error reporting the empty string
if grep -q "unknown object type \"nil\" in ReplicationController" "${ERROR_FILE}"; then
kube::log::status "\"kubectl create with empty string list returns error as expected: $(cat ${ERROR_FILE})"
else
kube::log::status "\"kubectl create with empty string list returns unexpected error or non-error: $(cat ${ERROR_FILE})"
exit 1
fi
rm "${ERROR_FILE}"
set +o nounset
set +o errexit
}
run_cmd_with_img_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing cmd with image"
# Test that a valid image reference value is provided as the value of --image in `kubectl run <name> --image`
output_message=$(kubectl run test1 --image=validname)
kube::test::if_has_string "${output_message}" 'deployment "test1" created'
kubectl delete deployments test1
# test invalid image name
output_message=$(! kubectl run test2 --image=InvalidImageName 2>&1)
kube::test::if_has_string "${output_message}" 'error: Invalid image name "InvalidImageName": invalid reference format'
set +o nounset
set +o errexit
}
run_client_config_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing client config"
# Command
# Pre-condition: kubeconfig "missing" is not a file or directory
output_message=$(! kubectl get pod --context="" --kubeconfig=missing 2>&1)
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: kubeconfig "missing" is not a file or directory
# Command
output_message=$(! kubectl get pod --user="" --kubeconfig=missing 2>&1)
# Post-condition: --user contains a valid / empty value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Command
output_message=$(! kubectl get pod --cluster="" --kubeconfig=missing 2>&1)
# Post-condition: --cluster contains a "valid" value, missing config file returns error
kube::test::if_has_string "${output_message}" "missing: no such file or directory"
# Pre-condition: context "missing-context" does not exist
# Command
output_message=$(! kubectl get pod --context="missing-context" 2>&1)
kube::test::if_has_string "${output_message}" 'context "missing-context" does not exist'
# Post-condition: invalid or missing context returns error
# Pre-condition: cluster "missing-cluster" does not exist
# Command
output_message=$(! kubectl get pod --cluster="missing-cluster" 2>&1)
kube::test::if_has_string "${output_message}" 'cluster "missing-cluster" does not exist'
# Post-condition: invalid or missing cluster returns error
# Pre-condition: user "missing-user" does not exist
# Command
output_message=$(! kubectl get pod --user="missing-user" 2>&1)
kube::test::if_has_string "${output_message}" 'auth info "missing-user" does not exist'
# Post-condition: invalid or missing user returns error
# test invalid config
kubectl config view | sed -E "s/apiVersion: .*/apiVersion: v-1/g" > "${TMPDIR:-/tmp}"/newconfig.yaml
output_message=$(! "${KUBE_OUTPUT_HOSTBIN}/kubectl" get pods --context="" --user="" --kubeconfig="${TMPDIR:-/tmp}"/newconfig.yaml 2>&1)
kube::test::if_has_string "${output_message}" "Error loading config file"
output_message=$(! kubectl get pod --kubeconfig=missing-config 2>&1)
kube::test::if_has_string "${output_message}" 'no such file or directory'
set +o nounset
set +o errexit
}
run_service_accounts_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing service accounts"
### Create a new namespace
# Pre-condition: the test-service-accounts namespace does not exist
kube::test::get_object_assert 'namespaces' '{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:' ':'
# Command
kubectl create namespace test-service-accounts
# Post-condition: namespace 'test-service-accounts' is created.
kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts'
### Create a service account in a specific namespace
# Command
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
kube::test::get_object_assert 'serviceaccount/test-service-account --namespace=test-service-accounts' "{{$id_field}}" 'test-service-account'
# Clean-up
kubectl delete serviceaccount test-service-account --namespace=test-service-accounts
# Clean up
kubectl delete namespace test-service-accounts
set +o nounset
set +o errexit
}
run_pod_templates_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing pod templates"
### Create PODTEMPLATE
# Pre-condition: no PODTEMPLATE
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/walkthrough/podtemplate.json "${kube_flags[@]}"
# Post-condition: nginx PODTEMPLATE is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
### Printing pod templates works
kubectl get podtemplates "${kube_flags[@]}"
[[ "$(kubectl get podtemplates -o yaml "${kube_flags[@]}" | grep nginx)" ]]
### Delete nginx pod template by name
# Pre-condition: nginx pod template is available
kube::test::get_object_assert podtemplates "{{range.items}}{{.metadata.name}}:{{end}}" 'nginx:'
# Command
kubectl delete podtemplate nginx "${kube_flags[@]}"
# Post-condition: No templates exist
kube::test::get_object_assert podtemplate "{{range.items}}{{.metadata.name}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_stateful_set_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:statefulsets)"
### Create and stop statefulset, make sure it doesn't leak pods
# Pre-condition: no statefulset exists
kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" ''
# Command: create statefulset
kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
### Scale statefulset test with current-replicas and replicas
# Pre-condition: 0 replicas
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1'
# Command: Scale up
kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}"
# Post-condition: 1 replica, named nginx-0
kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1'
kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2'
# Typically we'd wait and confirm that N>1 replicas are up, but this framework
# doesn't start the scheduler, so pet-0 will block all others.
# TODO: test robust scaling in an e2e.
wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
### Clean up
kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}"
# Post-condition: no pods from statefulset controller
wait-for-pods-with-label "app=nginx-statefulset" ""
set +o nounset
set +o errexit
}
run_lists_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl(v1:lists)"
### Create a List with objects from multiple versions
# Command
kubectl create -f hack/testdata/list.yaml "${kube_flags[@]}"
### Delete the List with objects from multiple versions
# Command
kubectl delete service/list-service-test deployment/list-deployment-test
set +o nounset
set +o errexit
}
run_persistent_volumes_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes"
### Create and delete persistent volume examples
# Pre-condition: no persistent volumes currently exist
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0001:'
kubectl delete pv pv0001 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/local-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0002:'
kubectl delete pv pv0002 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/volumes/gce.yaml "${kube_flags[@]}"
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" 'pv0003:'
kubectl delete pv pv0003 "${kube_flags[@]}"
# Post-condition: no PVs
kube::test::get_object_assert pv "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_persistent_volume_claims_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing persistent volumes claims"
### Create and delete persistent volume claim examples
# Pre-condition: no persistent volume claims currently exist
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-01.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-1:'
kubectl delete pvc myclaim-1 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-02.yaml "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-2:'
kubectl delete pvc myclaim-2 "${kube_flags[@]}"
kubectl create -f test/fixtures/doc-yaml/user-guide/persistent-volumes/claims/claim-03.json "${kube_flags[@]}"
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" 'myclaim-3:'
kubectl delete pvc myclaim-3 "${kube_flags[@]}"
# Post-condition: no PVCs
kube::test::get_object_assert pvc "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_storage_class_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing storage class"
### Create and delete storage class
# Pre-condition: no storage classes currently exist
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f - "${kube_flags[@]}" << __EOF__
{
"kind": "StorageClass",
"apiVersion": "storage.k8s.io/v1",
"metadata": {
"name": "storage-class-name"
},
"provisioner": "kubernetes.io/fake-provisioner-type",
"parameters": {
"zone":"us-east-1b",
"type":"ssd"
}
}
__EOF__
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kube::test::get_object_assert sc "{{range.items}}{{$id_field}}:{{end}}" 'storage-class-name:'
kubectl delete storageclass storage-class-name "${kube_flags[@]}"
# Post-condition: no storage classes
kube::test::get_object_assert storageclass "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_nodes_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:nodes)"
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
kube::test::describe_object_assert nodes "127.0.0.1" "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_object_events_assert nodes "127.0.0.1"
# Describe command should not print events information when show-events=false
kube::test::describe_object_events_assert nodes "127.0.0.1" false
# Describe command should print events information when show-events=true
kube::test::describe_object_events_assert nodes "127.0.0.1" true
# Describe command (resource only) should print detailed information
kube::test::describe_resource_assert nodes "Name:" "Labels:" "CreationTimestamp:" "Conditions:" "Addresses:" "Capacity:" "Pods:"
# Describe command should print events information by default
kube::test::describe_resource_events_assert nodes
# Describe command should not print events information when show-events=false
kube::test::describe_resource_events_assert nodes false
# Describe command should print events information when show-events=true
kube::test::describe_resource_events_assert nodes true
### kubectl patch update can mark node unschedulable
# Pre-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":true}}'
# Post-condition: node is unschedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
kubectl patch "${kube_flags[@]}" nodes "127.0.0.1" -p='{"spec":{"unschedulable":null}}'
# Post-condition: node is schedulable
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
# check webhook token authentication endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1beta1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/tokenreview-v1.json --validate=false
set +o nounset
set +o errexit
}
run_authorization_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing authorization"
# check remote authorization endpoint, kubectl doesn't actually display the returned object so this isn't super useful
# but it proves that works
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1.json --validate=false
kubectl create -f test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json --validate=false
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1beta1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1beta1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
SAR_RESULT_FILE="${KUBE_TEMP}/sar-result.json"
curl -k -H "Content-Type:" http://localhost:8080/apis/authorization.k8s.io/v1/subjectaccessreviews -XPOST -d @test/fixtures/pkg/kubectl/cmd/create/sar-v1.json > "${SAR_RESULT_FILE}"
if grep -q '"allowed": true' "${SAR_RESULT_FILE}"; then
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" returns as expected: $(cat "${SAR_RESULT_FILE}")"
else
kube::log::status "\"authorization.k8s.io/subjectaccessreviews\" does not return as expected: $(cat "${SAR_RESULT_FILE}")"
exit 1
fi
rm "${SAR_RESULT_FILE}"
set +o nounset
set +o errexit
}
run_retrieve_multiple_tests() {
set -o nounset
set -o errexit
# switch back to the default namespace
kubectl config set-context "${CONTEXT}" --namespace=""
kube::log::status "Testing kubectl(v1:multiget)"
kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:'
set +o nounset
set +o errexit
}
run_resource_aliasing_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing resource aliasing"
kubectl create -f examples/storage/cassandra/cassandra-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/storage/cassandra/cassandra-service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:cassandra:' || \
kube::test::get_object_assert "$object" "$request" 'cassandra:cassandra:'
kubectl delete all -l app=cassandra "${kube_flags[@]}"
set +o nounset
set +o errexit
}
run_kubectl_explain_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl(v1:explain)"
kubectl explain pods
# shortcuts work
kubectl explain po
kubectl explain po.status.message
set +o nounset
set +o errexit
}
run_swagger_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing swagger"
# Verify schema
file="${KUBE_TEMP}/schema-v1.json"
curl -s "http://127.0.0.1:${API_PORT}/swaggerapi/api/v1" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]]
[[ "$(grep "List of services" "${file}")" ]]
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]]
set +o nounset
set +o errexit
}
run_kubectl_sort_by_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --sort-by"
### sort-by should not panic if no pod exists
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl get pods --sort-by="{metadata.name}"
kubectl get pods --sort-by="{metadata.creationTimestamp}"
### sort-by should works if pod exists
# Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Check output of sort-by
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_has_string "${output_message}" "valid-pod"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
### sort-by should works by sorting by name
# Create three PODs
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod1.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod2.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:'
# Command
kubectl create "${kube_flags[@]}" -f hack/testdata/sorted-pods/sorted-pod3.yaml
# Post-condition: sorted-pod1 is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Check output of sort-by '{metadata.name}'
output_message=$(kubectl get pods --sort-by="{metadata.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod1:sorted-pod2:sorted-pod3:"
# Check output of sort-by '{metadata.labels.name}'
output_message=$(kubectl get pods --sort-by="{metadata.labels.name}")
kube::test::if_sort_by_has_correct_order "${output_message}" "sorted-pod3:sorted-pod2:sorted-pod1:"
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'sorted-pod1:sorted-pod2:sorted-pod3:'
# Command
kubectl delete "${kube_flags[@]}" pod --grace-period=0 --force --all
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_kubectl_all_namespace_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl --all-namespace"
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
# Post-condition: valid-pod is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
### Verify a specific namespace is ignored when all-namespaces is provided
# Command
kubectl get pods --all-namespaces --namespace=default
### Clean up
# Pre-condition: valid-pod exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete "${kube_flags[@]}" pod valid-pod --grace-period=0 --force
# Post-condition: valid-pod doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
set +o nounset
set +o errexit
}
run_certificates_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing certificates"
# approve
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate approve -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Approved'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
# deny
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny foo "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
kubectl create -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' ''
kubectl certificate deny -f hack/testdata/csr.yml "${kube_flags[@]}"
kubectl get csr "${kube_flags[@]}" -o json
kube::test::get_object_assert 'csr/foo' '{{range.status.conditions}}{{.type}}{{end}}' 'Denied'
kubectl delete -f hack/testdata/csr.yml "${kube_flags[@]}"
kube::test::get_object_assert csr "{{range.items}}{{$id_field}}{{end}}" ''
set +o nounset
set +o errexit
}
run_plugins_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing kubectl plugins"
# top-level plugin command
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl -h 2>&1)
kube::test::if_has_string "${output_message}" 'plugin\s\+Runs a command-line plugin'
# no plugins
output_message=$(! kubectl plugin 2>&1)
kube::test::if_has_string "${output_message}" 'no plugins installed'
# single plugins path
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin 2>&1)
kube::test::if_has_string "${output_message}" 'echo\s\+Echoes for test-cmd'
kube::test::if_has_string "${output_message}" 'get\s\+The wonderful new plugin-based get!'
kube::test::if_has_string "${output_message}" 'error\s\+The tremendous plugin that always fails!'
kube::test::if_has_not_string "${output_message}" 'The hello plugin'
kube::test::if_has_not_string "${output_message}" 'Incomplete plugin'
kube::test::if_has_not_string "${output_message}" 'no plugins installed'
# multiple plugins path
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin -h 2>&1)
kube::test::if_has_string "${output_message}" 'echo\s\+Echoes for test-cmd'
kube::test::if_has_string "${output_message}" 'get\s\+The wonderful new plugin-based get!'
kube::test::if_has_string "${output_message}" 'error\s\+The tremendous plugin that always fails!'
kube::test::if_has_string "${output_message}" 'hello\s\+The hello plugin'
kube::test::if_has_not_string "${output_message}" 'Incomplete plugin'
# don't override existing commands
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl get -h 2>&1)
kube::test::if_has_string "${output_message}" 'Display one or many resources'
kube::test::if_has_not_string "$output_message{output_message}" 'The wonderful new plugin-based get'
# plugin help
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello -h 2>&1)
kube::test::if_has_string "${output_message}" 'The hello plugin is a new plugin used by test-cmd to test multiple plugin locations.'
kube::test::if_has_string "${output_message}" 'Usage:'
# run plugin
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin hello 2>&1)
kube::test::if_has_string "${output_message}" '#hello#'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/:test/fixtures/pkg/kubectl/plugins2/ kubectl plugin echo 2>&1)
kube::test::if_has_string "${output_message}" 'This plugin works!'
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin hello 2>&1)
kube::test::if_has_string "${output_message}" 'unknown command'
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin error 2>&1)
kube::test::if_has_string "${output_message}" 'error: exit status 1'
# plugin tree
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree 2>&1)
kube::test::if_has_string "${output_message}" 'Plugin with a tree of commands'
kube::test::if_has_string "${output_message}" 'child1\s\+The first child of a tree'
kube::test::if_has_string "${output_message}" 'child2\s\+The second child of a tree'
kube::test::if_has_string "${output_message}" 'child3\s\+The third child of a tree'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 --help 2>&1)
kube::test::if_has_string "${output_message}" 'The first child of a tree'
kube::test::if_has_not_string "${output_message}" 'The second child'
kube::test::if_has_not_string "${output_message}" 'child2'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 2>&1)
kube::test::if_has_string "${output_message}" 'child one'
kube::test::if_has_not_string "${output_message}" 'child1'
kube::test::if_has_not_string "${output_message}" 'The first child'
# plugin env
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env -h 2>&1)
kube::test::if_has_string "${output_message}" "This is a flag 1"
kube::test::if_has_string "${output_message}" "This is a flag 2"
kube::test::if_has_string "${output_message}" "This is a flag 3"
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env --test1=value1 -t value2 2>&1)
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CURRENT_NAMESPACE'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CALLER'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_COMMAND=./env.sh'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_SHORT_DESC=The plugin envs plugin'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT=0'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST1=value1'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST2=value2'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_LOCAL_FLAG_TEST3=default'
set +o nounset
set +o errexit
}
run_impersonation_tests() {
set -o nounset
set -o errexit
kube::log::status "Testing impersonation"
output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1)
kube::test::if_has_string "${output_message}" 'without impersonating a user'
if kube::test::if_supports_resource "${csr}" ; then
# --as
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1
kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated'
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
# --as-group
kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon
kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3'
kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon '
kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}"
fi
set +o nounset
set +o errexit
}
# Runs all kubectl tests.
# Requires an env var SUPPORTED_RESOURCES which is a comma separated list of
# resources for which tests should be run.
runTests() {
foundError="False"
if [ -z "${SUPPORTED_RESOURCES:-}" ]; then
echo "Need to set SUPPORTED_RESOURCES env var. It is a list of resources that are supported and hence should be tested. Set it to (*) to test all resources"
exit 1
fi
kube::log::status "Checking kubectl version"
kubectl version
# use timestamp as the name of namespace because increasing the variable inside subshell
# does not affect the value of the variable outside the subshell.
create_and_use_new_namespace() {
namespace_number=$(date +%s%N)
kube::log::status "Creating namespace namespace${namespace_number}"
kubectl create namespace "namespace${namespace_number}"
kubectl config set-context "${CONTEXT}" --namespace="namespace${namespace_number}"
}
kube_flags=(
-s "http://127.0.0.1:${API_PORT}"
)
# token defined in hack/testdata/auth-tokens.csv
kube_flags_with_token=(
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version")
kube_flags_with_token+=("--match-server-version")
fi
if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
pdb_max_unavailable=".spec.maxUnavailable"
template_generation_field=".spec.templateGeneration"
container_len="(len .spec.template.spec.containers)"
image_field0="(index .spec.template.spec.containers 0).image"
image_field1="(index .spec.template.spec.containers 1).image"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
fi
# Make sure "kubernetes" service exists.
if kube::test::if_supports_resource "${services}" ; then
# Attempt to create the kubernetes service, tolerating failure (since it might already exist)
kubectl create "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml || true
# Require the service to exist (either we created it or the API server did)
kubectl get "${kube_flags[@]}" -f hack/testdata/kubernetes-service.yaml
fi
#########################
# Kubectl version #
#########################
record_command run_kubectl_version_tests
#######################
# kubectl config set #
#######################
record_command run_kubectl_config_set_tests
#######################
# kubectl local proxy #
#######################
record_command run_kubectl_local_proxy_tests
#########################
# RESTMapper evaluation #
#########################
record_command run_RESTMapper_evaluation_tests
################
# Cluster Role #
################
if kube::test::if_supports_resource "${clusterroles}" ; then
record_command run_clusterroles_tests
fi
########
# Role #
########
if kube::test::if_supports_resource "${roles}" ; then
record_command run_role_tests
fi
#########################
# Assert short name #
#########################
record_command run_assert_short_name_tests
#########################
# Assert categories #
#########################
## test if a category is exported during discovery
if kube::test::if_supports_resource "${pods}" ; then
record_command run_assert_categories_tests
fi
###########################
# POD creation / deletion #
###########################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_pod_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_save_config_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_create_error_tests
fi
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move apply tests to run on rs instead of pods so that they can be
# run for federation apiserver as well.
record_command run_kubectl_apply_tests
record_command run_kubectl_run_tests
record_command run_kubectl_create_filter_tests
fi
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_kubectl_apply_deployments_tests
fi
###############
# Kubectl get #
###############
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move get tests to run on rs instead of pods so that they can be
# run for federation apiserver as well.
record_command run_kubectl_get_tests
fi
##################
# Global timeout #
##################
if kube::test::if_supports_resource "${pods}" ; then
# TODO: Move request timeout tests to run on rs instead of pods so that they
# can be run for federation apiserver as well.
record_command run_kubectl_request_timeout_tests
fi
#####################################
# Third Party Resources #
#####################################
# customresourcedefinitions cleanup after themselves. Run these first, then TPRs
if kube::test::if_supports_resource "${customresourcedefinitions}" ; then
record_command run_crd_tests
fi
#################
# Run cmd w img #
#################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_cmd_with_img_tests
fi
#####################################
# Recursive Resources via directory #
#####################################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_recursive_resources_tests
fi
##############
# Namespaces #
##############
if kube::test::if_supports_resource "${namespaces}" ; then
record_command run_namespace_tests
fi
###########
# Secrets #
###########
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${secrets}" ; then
record_command run_secrets_test
fi
fi
######################
# ConfigMap #
######################
if kube::test::if_supports_resource "${namespaces}"; then
if kube::test::if_supports_resource "${configmaps}" ; then
record_command run_configmap_tests
fi
fi
####################
# Client Config #
####################
record_command run_client_config_tests
####################
# Service Accounts #
####################
if kube::test::if_supports_resource "${namespaces}" && kube::test::if_supports_resource "${serviceaccounts}" ; then
record_command run_service_accounts_tests
fi
#################
# Pod templates #
#################
if kube::test::if_supports_resource "${podtemplates}" ; then
record_command run_pod_templates_tests
fi
############
# Services #
############
if kube::test::if_supports_resource "${services}" ; then
record_command run_service_tests
fi
##################
# DaemonSets #
##################
if kube::test::if_supports_resource "${daemonsets}" ; then
record_command run_daemonset_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_daemonset_history_tests
fi
fi
###########################
# Replication controllers #
###########################
if kube::test::if_supports_resource "${namespaces}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_rc_tests
fi
fi
######################
# Deployments #
######################
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_deployment_tests
fi
######################
# Replica Sets #
######################
if kube::test::if_supports_resource "${replicasets}" ; then
record_command run_rs_tests
fi
#################
# Stateful Sets #
#################
if kube::test::if_supports_resource "${statefulsets}" ; then
record_command run_stateful_set_tests
if kube::test::if_supports_resource "${controllerrevisions}"; then
record_command run_statefulset_history_tests
fi
fi
######################
# Lists #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${deployments}" ; then
record_command run_lists_tests
fi
fi
######################
# Multiple Resources #
######################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_multi_resources_tests
fi
fi
######################
# Persistent Volumes #
######################
if kube::test::if_supports_resource "${persistentvolumes}" ; then
record_command run_persistent_volumes_tests
fi
############################
# Persistent Volume Claims #
############################
if kube::test::if_supports_resource "${persistentvolumeclaims}" ; then
record_command run_persistent_volume_claims_tests
fi
############################
# Storage Classes #
############################
if kube::test::if_supports_resource "${storageclass}" ; then
record_command run_storage_class_tests
fi
#########
# Nodes #
#########
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_nodes_tests
fi
########################
# authorization.k8s.io #
########################
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
record_command run_authorization_tests
fi
# kubectl auth can-i
# kube-apiserver is started with authorization mode AlwaysAllow, so kubectl can-i always returns yes
if kube::test::if_supports_resource "${subjectaccessreviews}" ; then
output_message=$(kubectl auth can-i '*' '*' 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get pods --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(kubectl auth can-i get invalid_resource 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "the server doesn't have a resource type"
output_message=$(kubectl auth can-i get /logs/ 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "yes"
output_message=$(! kubectl auth can-i get /logs/ --subresource=log 2>&1 "${kube_flags[@]}")
kube::test::if_has_string "${output_message}" "subresource can not be used with nonResourceURL"
output_message=$(kubectl auth can-i list jobs.batch/bar -n foo --quiet 2>&1 "${kube_flags[@]}")
kube::test::if_empty_string "${output_message}"
fi
# kubectl auth reconcile
if kube::test::if_supports_resource "${clusterroles}" ; then
kubectl auth reconcile "${kube_flags[@]}" -f test/fixtures/pkg/kubectl/cmd/auth/rbac-resource-plus.yaml
kube::test::get_object_assert 'rolebindings -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-RB:'
kube::test::get_object_assert 'roles -n some-other-random -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-R:'
kube::test::get_object_assert 'clusterrolebindings -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CRB:'
kube::test::get_object_assert 'clusterroles -l test-cmd=auth' "{{range.items}}{{$id_field}}:{{end}}" 'testing-CR:'
kubectl delete "${kube_flags[@]}" rolebindings,role,clusterroles,clusterrolebindings -n some-other-random -l test-cmd=auth
fi
#####################
# Retrieve multiple #
#####################
if kube::test::if_supports_resource "${nodes}" ; then
if kube::test::if_supports_resource "${services}" ; then
record_command run_retrieve_multiple_tests
fi
fi
#####################
# Resource aliasing #
#####################
if kube::test::if_supports_resource "${services}" ; then
if kube::test::if_supports_resource "${replicationcontrollers}" ; then
record_command run_resource_aliasing_tests
fi
fi
###########
# Explain #
###########
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_explain_tests
fi
###########
# Swagger #
###########
record_command run_swagger_tests
#####################
# Kubectl --sort-by #
#####################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_sort_by_tests
fi
############################
# Kubectl --all-namespaces #
############################
if kube::test::if_supports_resource "${pods}" ; then
record_command run_kubectl_all_namespace_tests
fi
################
# Certificates #
################
if kube::test::if_supports_resource "${csr}" ; then
record_command run_certificates_tests
fi
###########
# Plugins #
###########
record_command run_plugins_tests
#################
# Impersonation #
#################
record_command run_impersonation_tests
kube::test::clear_all
if [ "$foundError" == "True" ]; then
echo "TEST FAILED"
exit 1
fi
}
run_initializer_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing --include-uninitialized"
### Create a deployment
kubectl create --request-timeout=1 -f hack/testdata/initializer-deployments.yaml 2>&1 "${kube_flags[@]}" || true
### Test kubectl get --include-uninitialized
# Command
output_message=$(kubectl get deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl get deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Command
output_message=$(kubectl get deployments web 2>&1 "${kube_flags[@]}")
# Post-condition: I assume "web" is the deployment name
kube::test::if_has_string "${output_message}" 'web'
# Command
output_message=$(kubectl get deployments --show-all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
### Test kubectl describe --include-uninitialized
# Command
output_message=$(kubectl describe deployments 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl describe deployments web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
# Command
output_message=$(kubectl describe deployments web --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "run=web" should be part of the output
kube::test::if_has_string "${output_message}" 'run=web'
### Test kubectl label --include-uninitialized
# Command
output_message=$(kubectl label deployments labelkey1=labelvalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey1}}" 'labelvalue1'
# Command
output_message=$(kubectl label deployments labelkey2=labelvalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey3=labelvalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey4=labelvalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey4}}" 'labelvalue4'
# Command
output_message=$(kubectl label deployments labelkey5=labelvalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl label deployments labelkey6=labelvalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey6}}" 'labelvalue6'
# Command
output_message=$(kubectl label deployments web labelkey7=labelvalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web is labelled
kube::test::if_has_string "${output_message}" 'deployment "web" labeled'
kube::test::get_object_assert 'deployments web' "{{${labels_field}.labelkey7}}" 'labelvalue7'
# Found All Labels
kube::test::get_object_assert 'deployments web' "{{${labels_field}}}" 'map[labelkey1:labelvalue1 labelkey4:labelvalue4 labelkey6:labelvalue6 labelkey7:labelvalue7 run:web]'
### Test kubectl annotate --include-uninitialized
# Command
output_message=$(kubectl annotate deployments annotatekey1=annotatevalue1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey1}}" 'annotatevalue1'
# Command
output_message=$(kubectl annotate deployments annotatekey2=annotatevalue2 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey3=annotatevalue3 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey4=annotatevalue4 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey4}}" 'annotatevalue4'
# Command
output_message=$(kubectl annotate deployments annotatekey5=annotatevalue5 -l run=web --all 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl annotate deployments annotatekey6=annotatevalue6 -l run=web --all --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey6}}" 'annotatevalue6'
# Command
output_message=$(kubectl annotate deployments web annotatekey7=annotatevalue7 2>&1 "${kube_flags[@]}")
# Post-condition: web DEPLOYMENT has annotation
kube::test::if_has_string "${output_message}" 'deployment "web" annotated'
kube::test::get_object_assert 'deployments web' "{{${annotations_field}.annotatekey7}}" 'annotatevalue7'
### Test kubectl edit --include-uninitialized
[ "$(EDITOR=cat kubectl edit deployments 2>&1 "${kube_flags[@]}" | grep 'edit cancelled, no objects found')" ]
[ "$(EDITOR=cat kubectl edit deployments --include-uninitialized 2>&1 "${kube_flags[@]}" | grep 'Edit cancelled, no changes made.')" ]
### Test kubectl set image --include-uninitialized
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.11 -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set image deployments *=nginx:1.12 -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
# Command
output_message=$(kubectl set image deployments *=nginx:1.13 -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "image updated" should be part of the output
kube::test::if_has_string "${output_message}" 'image updated'
### Test kubectl set resources --include-uninitialized
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi -l run=web 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=200m,memory=256Mi -l run=web --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
# Command
output_message=$(kubectl set resources deployments --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=512Mi -l run=web --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "resource requirements updated" should be part of the output
kube::test::if_has_string "${output_message}" 'resource requirements updated'
### Test kubectl set selector --include-uninitialized
# Create a service with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-redis-master-service.yaml 2>&1 "${kube_flags[@]}" || true
# Command
output_message=$(kubectl set selector services role=padawan --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "selector updated" should be part of the output
kube::test::if_has_string "${output_message}" 'selector updated'
# Command
output_message=$(kubectl set selector services role=padawan --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl set subject --include-uninitialized
# Create a create clusterrolebinding with initializer
kubectl create --request-timeout=1 -f hack/testdata/initializer-clusterrolebinding.yaml 2>&1 "${kube_flags[@]}" || true
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
# Command
output_message=$(kubectl set subject clusterrolebinding --user=foo -l clusterrolebinding=super --include-uninitialized --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "subjects updated" should be part of the output
kube::test::if_has_string "${output_message}" 'subjects updated'
### Test kubectl set serviceaccount --include-uninitialized
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "serviceaccount updated" should be part of the output
kube::test::if_has_string "${output_message}" 'serviceaccount updated'
# Command
output_message=$(kubectl set serviceaccount deployment serviceaccount1 --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The output should be empty
kube::test::if_empty_string "${output_message}"
### Test kubectl delete --include-uninitialized
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:'
# Command
output_message=$(kubectl delete clusterrolebinding --all --include-uninitialized=false 2>&1 "${kube_flags[@]}")
# Post-condition: The text "No resources found" should be part of the output
kube::test::if_has_string "${output_message}" 'No resources found'
# Command
output_message=$(kubectl delete clusterrolebinding --all 2>&1 "${kube_flags[@]}")
# Post-condition: The text "deleted" should be part of the output
kube::test::if_has_string "${output_message}" 'deleted'
kube::test::get_object_assert clusterrolebinding/super-admin "{{range.items}}{{$id_field}}:{{end}}" ''
### Test kubectl apply --include-uninitialized
# Pre-Condition: no POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
# apply pod a
kubectl apply --prune --request-timeout=20 --include-uninitialized=false --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
# apply pod a and prune uninitialized deployments web
kubectl apply --prune --request-timeout=20 --include-uninitialized --all -f hack/testdata/prune/a.yaml "${kube_flags[@]}" 2>&1
# check right pod exists
kube::test::get_object_assert pods/a "{{${id_field}}}" 'a'
# Post-condition: Other uninitialized resources should not be pruned
kube::test::get_object_assert deployments/web "{{range.items}}{{$id_field}}:{{end}}" 'web'
kube::test::get_object_assert services/redis-master "{{range.items}}{{$id_field}}:{{end}}" 'redis-master'
# cleanup
kubectl delete pod a
kubectl delete --request-timeout=1 deploy web
kubectl delete --request-timeout=1 service redis-master
set +o nounset
set +o errexit
}
|
pwittrock/reference-docs
|
vendor/k8s.io/kubernetes/hack/make-rules/test-cmd-util.sh
|
Shell
|
apache-2.0
| 252,672 |
#!/usr/bin/env sh
rootfolder=/nfs/hn46/xiaolonw/cnncode/caffe-3dnormal_local
GLOG_logtostderr=1 $rootfolder/build/examples/3dnormal/convert_normal_test.bin /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window/images_test/ /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window/testLabels_ran.txt /nfs/ladoga_no_backups/users/xiaolonw/seg_cls/sliding_window/leveldb/3d_test_db_small_ran 0 0 55 55
|
xiaolonw/caffe-3dnormal_joint_past
|
scripts/3dnormal_win_cls_denoise_fc2/convert_3dnormal_test_ran.sh
|
Shell
|
bsd-2-clause
| 413 |
########################################################################
# Tests for xtrabackup --print-param
########################################################################
my_cnf="[mysqld]
datadir=/some/data/dir
tmpdir=/some/tmp/dir1:/some/tmp/dir2
innodb_data_home_dir=/some/innodb/dir
innodb_data_file_path=ibdata1:10M;ibdata2:5M:autoextend
innodb_log_group_home_dir=/some/log/dir
innodb_log_files_in_group=3
innodb_log_file_size=5M
innodb_flush_method=O_DIRECT
innodb_page_size=4K
innodb_fast_checksum=1
innodb_log_block_size=4K
innodb_doublewrite_file=/some/doublewrite/file
innodb_undo_directory=/some/undo/directory
innodb_undo_tablespaces=8
innodb_checksum_algorithm=strict_crc32
innodb_log_checksum_algorithm=none
innodb_buffer_pool_filename=/some/buffer/pool/file"
echo "$my_cnf" >$topdir/my.cnf
diff -u <($XB_BIN --defaults-file=$topdir/my.cnf --print-param) - <<EOF
# This MySQL options file was generated by XtraBackup.
[mysqld]
datadir=/some/data/dir
tmpdir=/some/tmp/dir1:/some/tmp/dir2
innodb_data_home_dir=/some/innodb/dir
innodb_data_file_path=ibdata1:10M;ibdata2:5M:autoextend
innodb_log_group_home_dir=/some/log/dir
innodb_log_files_in_group=3
innodb_log_file_size=5242880
innodb_flush_method=O_DIRECT
innodb_page_size=4096
innodb_fast_checksum=1
innodb_log_block_size=4096
innodb_doublewrite_file=/some/doublewrite/file
innodb_undo_directory=/some/undo/directory
innodb_undo_tablespaces=8
innodb_checksum_algorithm=strict_crc32
innodb_log_checksum_algorithm=none
innodb_buffer_pool_filename=/some/buffer/pool/file
EOF
|
tplavcic/percona-xtrabackup
|
storage/innobase/xtrabackup/test/t/xb_print_param.sh
|
Shell
|
gpl-2.0
| 1,552 |
#!/bin/bash
cat coverage/lcov.info | ./node_modules/.bin/coveralls
echo '' # reset exit code -- failure to post coverage shouldn't be an error.
|
klausw/dygraphs
|
scripts/post-coverage.sh
|
Shell
|
mit
| 146 |
#!/bin/bash
echo "This script builds docs on Peter's Orocos tree
1a. expects a directory build/orocos-rtt-VERSION and build/orocos-rtt-VERSION/build
1b. OR if version is 'latest' , do fresh check out from trunk
3. make docs in build
4. tar the result
5. copy over all the doc-tar files
6. extract and move files on server
"
set -ex
if [ x$1 == x ] ; then
echo "Please provide version-string parameter"
exit 1
fi;
if [ x$2 != xmaster ] ; then
DEV=no
else
DEV=yes
fi;
DOLOCAL=yes
. release-config
if test $DOLOCAL = yes; then
USER=$USER
SERVER=localhost
SPREFIX=src/export/upload
else
if test x$DOOROCOSORG = xyes -a x$DEV = xno; then
USER=bruyninckxh2
SERVER=www.orocos.org
SPREFIX=www.orocos.org
else
USER=psoetens
SERVER=ftp.mech.kuleuven.be
SPREFIX=/www/orocos/pub
APREFIX=/www/orocos
fi
fi
if test x$DEV = xyes; then
BRANCH=devel
VERSION=master
VVERSION=master
else
BRANCH=stable
VERSION=$1
VVERSION=v$1
fi
# i.e. 1.2.1 -> 1.2
BRANCHVERSION=$(echo $VERSION | sed -e 's/\(.*\)\.\(.*\)\..*/\1.\2/g')
topdir=$(pwd)
if test x$DOAUTO != xyes; then
echo "VERSION is set to $VERSION (use 'master' to install trunk on server)"
echo "DEV is set to $DEV (use 'master' as arg2 to install in 'devel' on server)"
echo "Press c to continue, any other key to upload files to server and Ctrl-C to abort..."
read -s -n1 x
else
x=c
fi
if [ x$x == xc ] ; then
#if master, check out trunk
mkdir -p build; cd build
if test x$VERSION = xmaster -o x$DOCHECKOUT = xyes; then
rm -rf orocos-toolchain-$VERSION/rtt
cd $topdir/orocos-toolchain-rtt
git archive --format=tar --prefix=orocos-toolchain-$VERSION/rtt/ HEAD | (cd $topdir/build && tar xf -)
cd $topdir/build
cd orocos-toolchain-$VERSION/rtt
mkdir build
cd build
cmake ..
fi
cd $topdir/build
#all should be equal for MASTER and normal :
if ! test -d orocos-toolchain-$VERSION/rtt ; then
echo "Could not find orocos-toolchain-$VERSION/rtt !"
exit 1
fi
cd orocos-toolchain-$VERSION/rtt
# Doxygen
mkdir -p build
cd build
cmake ..
make docapi
cd doc
tar -cjf orocos-rtt-$VERSION-api.tar.bz2 api
mv orocos-rtt-$VERSION-api.tar.bz2 api ..
#make install-docs # install rtt.tag file !
cd ..
cd ..
# Build base package
cd build
cd doc
make dochtml -j12
make docpdf -j12
cp -a xml doc-xml
rm -rf doc-xml/images/hires # not for distribution
tar -cjf orocos-rtt-$VERSION-doc.tar.bz2 $(find doc-xml -name "*.png" -o -name "*.pdf" -o -name "*.html" -o -name "*css") ||exit 1
rm -rf doc-xml
mv orocos-rtt-$VERSION-doc.tar.bz2 ..
cd ..
cd ..
if test x$DOAUTO != xyes; then
echo "Press a key to copy Docs to server, Ctrl-C to abort..."
read -s -n1
fi
else
cd $topdir/build/orocos-toolchain-$VERSION/rtt
fi; # press d
while [ 1 ]; do
echo -e "\n**** COPYING TO $SERVER: ****\n"
# Docs :
# Save in version subdir as tar, save master in doc dir. (saves space).
cd build
# Copy over tar.bz2 files
# APREFIX is for triggering the automounter
ssh $USER@$SERVER "ls $APREFIX; mkdir -p $SPREFIX/$BRANCH/rtt/$VVERSION"
scp orocos-rtt-$VERSION-doc.tar.bz2 $USER@$SERVER:$SPREFIX/$BRANCH/rtt/$VVERSION
scp orocos-rtt-$VERSION-api.tar.bz2 $USER@$SERVER:$SPREFIX/$BRANCH/rtt/$VVERSION
# Install them in the 'documentation' dir:
# 'doc' is not physically existing, it is the drupal path to imported docs
# 'doc-xml' are the xml generated html/pdf files
# 'api' is the doxygen generated sources
if test x$DEV = xno; then
ssh $USER@$SERVER "mkdir -p $SPREFIX/$BRANCH/documentation/rtt/v$BRANCHVERSION.x/"
ssh $USER@$SERVER "cd $SPREFIX/$BRANCH/documentation/rtt/v$BRANCHVERSION.x/ &&
rm -rf doc api doc-xml &&
tar -xjf ../../../rtt/$VVERSION/orocos-rtt-$VERSION-doc.tar.bz2 &&
tar -xjf ../../../rtt/$VVERSION/orocos-rtt-$VERSION-api.tar.bz2 &&
rm -f ../../../rtt/$VVERSION/orocos-rtt-$VERSION-api.tar.bz2 ../../../rtt/$VVERSION/orocos-rtt-$VERSION-doc.tar.bz2 &&
cd .. && { linkv=\$(ls -l v2.x | sed -e\"s/l.*-> v//;s/\.//g;s/x//\"); branchv=\$(echo $BRANCHVERSION | sed -e\"s/\.//g\"); if test 0\$branchv -gt 0\$linkv; then
rm -f v2.x && ln -s v$BRANCHVERSION.x v2.x ; echo Updated link for new version.
fi;
}
"
else
ssh $USER@$SERVER "mkdir -p $SPREFIX/$BRANCH/documentation/rtt/$VVERSION"
ssh $USER@$SERVER "cd $SPREFIX/$BRANCH/documentation/rtt/$VVERSION &&
rm -rf doc api doc-xml &&
tar -xjf ../../../rtt/$VVERSION/orocos-rtt-$VERSION-doc.tar.bz2 &&
tar -xjf ../../../rtt/$VVERSION/orocos-rtt-$VERSION-api.tar.bz2 &&
rm -f ../../../rtt/$VVERSION/orocos-rtt-$VERSION-api.tar.bz2 ../../../rtt/$VVERSION/orocos-rtt-$VERSION-doc.tar.bz2
"
fi
cd ..
# copy latest news to packages directory :
scp NEWS $USER@$SERVER:$SPREFIX/$BRANCH/rtt/NEWS.txt
scp README $USER@$SERVER:$SPREFIX/$BRANCH/rtt/README.txt
if test x$DOOROCOSORG = xno -o x$DOLOCAL = xyes -o x$DEV = xyes; then
echo "Completed succesfully."
exit 0;
fi
# redo for making a copy on the mech server as well:
USER=psoetens
SERVER=ftp.mech.kuleuven.be
SPREFIX=/www/orocos/pub
APREFIX=/www/orocos
DOOROCOSORG=no
done; # while [ 1 ]
|
smits/rtt
|
tools/scripts/release-docs.sh
|
Shell
|
gpl-2.0
| 5,075 |
#!/bin/bash
fw_installed ffead-cpp-nginx-postgresql && return 0
fw_depends postgresql
fw_depends ffead-cpp-unixodbc
fw_depends ffead-cpp-mongocdriver
fw_depends ffead-cpp-nginx
cd ${IROOT}/ffead-cpp-src/
cp -f web/te-benchmark/sql-src/TeBkWorldsql.h web/te-benchmark/include/TeBkWorld.h
cp -f web/te-benchmark/sql-src/TeBkWorldsql.cpp web/te-benchmark/src/TeBkWorld.cpp
cp -f web/te-benchmark/config/sdormpostgresql.xml web/te-benchmark/config/sdorm.xml
rm -rf ffead-cpp-2.0-bin
make build-apps
rm -rf ${IROOT}/ffead-cpp-2.0
cp -rf ffead-cpp-2.0-bin ${IROOT}/ffead-cpp-2.0
cd ${IROOT}/ffead-cpp-2.0
rm -rf web/default web/oauthApp web/flexApp web/markers
chmod 755 *.sh resources/*.sh rtdcf/autotools/*.sh
./server.sh > ffead-cpp-nginx-postgresql.log 2>&1
while ! echo exit | nc localhost 8080; do sleep 5; done
rm -f serv.ctrl
sleep 10
cd ${IROOT}
echo -e "export PATH=${IROOT}/nginxfc/sbin:\$PATH" > $IROOT/ffead-cpp-nginx-postgresql.installed
|
saturday06/FrameworkBenchmarks
|
toolset/setup/linux/frameworks/ffead-cpp-nginx-postgresql.sh
|
Shell
|
bsd-3-clause
| 951 |
#! /bin/bash
# BuildTarget: images/plus.png
set -e
cp $GAFFER_ROOT/graphics/plus.png images
|
andrewkaufman/gaffer
|
doc/source/WorkingWithTheNodeGraph/TutorialUsingTheOSLCodeNode/generate.sh
|
Shell
|
bsd-3-clause
| 94 |
docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/etc:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.2.1 elasticsearch
|
devops-israel/delete-aws-es-incidents
|
vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh
|
Shell
|
mit
| 172 |
#!/bin/bash
#
file=$1
[ -f $file ] || exit 1
root=${file%.rl}
# Get the machine name.
machine=`sed -n 's/^[\t ]*machine[\t ]*\([a-zA-Z_0-9]*\)[\t ]*;[\t ]*$/\1/p' $file`
# Make a temporary version of the test case using the Java language translations.
sed -n '/\/\*/,/\*\//d;p' $file | txl -q stdin langtrans_go.txl > $file.pr
needs_eof=`sed '/@NEEDS_EOF/s/^.*$/yes/p;d' $file`
if [ "$needs_eof" != 'yes' ]; then
needs_eof=`sed -n '/\/\*/,/\*\//d;p' $file | txl -q stdin checkeofact.txl`
fi
# Begin writing out the test case.
cat << EOF
/*
* @LANG: go
* @GENERATED: yes
EOF
grep '@ALLOW_GENFLAGS:' $file
grep '@ALLOW_MINFLAGS:' $file
cat << EOF
*/
package main
import "fmt"
EOF
# Write the data declarations
sed -n '/^%%$/q;{s/^/\t/;p}' $file.pr
# Write out the machine specification.
sed -n '/^%%{$/,/^}%%/{s/^/\t/;p}' $file.pr
# Write out the init and execute routines.
cat << EOF
var cs int
%% write data;
func prepare() {
EOF
sed -n '0,/^%%$/d; /^%%{$/q; {s/^/\t\t/;p}' $file.pr
cat << EOF
%% write init;
}
func exec(data string) {
var p int = 0
var pe int = len(data)
EOF
[ "$needs_eof" = "yes" ] && echo " var eof int = pe"
cat << EOF
%% write exec;
}
func finish() {
if cs >= ${machine}_first_final {
fmt.Println("ACCEPT")
} else {
fmt.Println("FAIL")
}
}
EOF
# Write out the test data.
sed -n '0,/\/\* _____INPUT_____/d; /_____INPUT_____ \*\//q; p;' $file | awk '
BEGIN {
printf "var inp []string = []string{"
}
{
printf "%s, ", $0
}
END {
print "}"
print ""
}'
# Write out the main routine.
cat << EOF
func main() {
for _, data := range inp {
prepare()
exec(data)
finish()
}
}
EOF
# Write out the expected output.
sed -n '/\/\* _____OUTPUT_____/,/_____OUTPUT_____ \*\//p;' $file
# Don't need this language-specific file anymore.
rm $file.pr
|
kaostao/ragel
|
test/langtrans_go.sh
|
Shell
|
gpl-2.0
| 1,867 |
#!/usr/bin/env bash
set_variables ()
{
while true
do
read -p "Input Email: " EMAIL
regex="^[a-z0-9!#\$%&'*+/=?^_\`{|}~-]+(\.[a-z0-9!#$%&'*+/=?^_\`{|}~-]+)*@([a-z0-9]([a-z0-9-]*[a-z0-9])?\.)+[a-z0-9]([a-z0-9-]*[a-z0-9])?\$";
if [[ "$EMAIL" =~ $regex ]]
then
export BITPAY_EMAIL=$EMAIL
break
else
echo "Please input a valid email"
fi
done
while true
do
read -p "Input Password: " PASSWORD
read -p "Password Confirmation: " PASSWORD2
if [ "$PASSWORD" = "$PASSWORD2" ]
then
break
else
echo "Please input a valid password"
fi
done
while true
do
read -p "Input URL: " URL
if [ -z $URL ]
then
echo "Please input a valid URL"
else
break
fi
done
}
if [ -z "$1" ]
then
echo "No parameters passed so using Environment Variables"
if [ -z "$BITPAY_EMAIL" ] || [ -z "$BITPAY_PASSWORD"]
then
echo "ERROR: No Email or password are set."
echo "set BITPAY_EMAIL and BITPAY_PASSWORD as environment variables"
echo "or pass them as arguments when running this script"
while true; do
read -p "Do you wish to set your environment variables here? " yn
case $yn in
[Yy]* ) set_variables; break;;
[Nn]* ) echo "Closing script"; exit;;
* ) echo "Please answer yes or no.";;
esac
done
else
echo "Environment Variables already exist for BITPAY."
fi
else
echo "Username $1 and Password $2 passed from command line"
URL=$1
EMAIL=$2
PASSWORD=$3
echo "Setting user and Password to new environment variables..."
fi
export BITPAY_EMAIL=$EMAIL
export BITPAY_PASSWORD=$PASSWORD
export BITPAY_URL=$URL
echo "Using Email: $EMAIL"
echo "Using URL: $URL"
echo "Removing old keys..."
if [ -e /tmp/bitpay.pub ]
then
rm -rf /tmp/bitpay.pub
rm -rf /tmp/bitpay.pri
rm -rf /tmp/token.json
fi
echo "Checking if Selenium exists..."
if [ ! -f selenium-server-standalone-2.44.0.jar ]
then
echo "Downloading Selenium"
curl -O http://selenium-release.storage.googleapis.com/2.44/selenium-server-standalone-2.44.0.jar
fi
echo "Running Selenium and the tests"
php bin/behat tests/integrations
|
oliverds/openclassifieds2
|
oc/vendor/bitpay/vendor/bitpay/php-client/integration_tests.sh
|
Shell
|
gpl-3.0
| 2,086 |
find ./src/main/webapp/js/src -name "*.spec.js" | xargs node_modules/mocha/bin/mocha
|
zhx828/cbioportal
|
portal/runSpecJS.sh
|
Shell
|
agpl-3.0
| 85 |
#!/bin/bash
mkdir -p "$PREFIX/bin"
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
export L="${LDFLAGS}"
mkdir -p "$BINDIR"
(cd kent/src/lib && make)
(cd kent/src/htslib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/hg/utils/bedToPsl && make)
cp bin/bedToPsl "$PREFIX/bin"
chmod +x "$PREFIX/bin/bedToPsl"
|
ostrokach/bioconda-recipes
|
recipes/ucsc-bedtopsl/build.sh
|
Shell
|
mit
| 341 |
#! /bin/sh
# sh tty.sh tty.c
# This inserts all the needed #ifdefs for IF{} statements
# and generates tty.c
#
# Stupid cpp on A/UX barfs on ``#if defined(FOO) && FOO < 17'' when
# FOO is undefined. Reported by Robert C. Tindall ([email protected])
#
rm -f $1
sed -e '1,26d' \
-e 's%^IF{\([^}]*\)}\(.*\)%#if defined(\1)\
\2\
#endif /* \1 */%' \
-e 's%^IFN{\([^}]*\)}\(.*\)%#if !defined(\1)\
\2\
#endif /* \1 */%' \
-e 's%^XIF{\([^}]*\)}\(.*\)%#if defined(\1)\
#if (\1 < MAXCC)\
\2\
#endif \
#endif /* \1 */%' \
< $0 > $1
chmod -w $1
exit 0
/* Copyright (c) 2008, 2009
* Juergen Weigert ([email protected])
* Michael Schroeder ([email protected])
* Micah Cowan ([email protected])
* Sadrul Habib Chowdhury ([email protected])
* Copyright (c) 1993-2002, 2003, 2005, 2006, 2007
* Juergen Weigert ([email protected])
* Michael Schroeder ([email protected])
* Copyright (c) 1987 Oliver Laumann
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (see the file COPYING); if not, see
* http://www.gnu.org/licenses/, or contact Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02111-1301 USA
*
****************************************************************
*/
/*
* NOTICE: tty.c is automatically generated from tty.sh
* Do not change anything here. If you then change tty.sh.
*/
#include <sys/types.h>
#include <signal.h>
#include <fcntl.h>
#ifndef sgi
# include <sys/file.h>
#endif
#if !defined(sun) || defined(SUNOS3)
# include <sys/ioctl.h> /* collosions with termios.h */
#else
# ifndef TIOCEXCL
# include <sys/ttold.h> /* needed for TIOCEXCL */
# endif
#endif
#ifdef __hpux
# include <sys/modem.h>
#endif
#ifdef ISC
# include <sys/tty.h>
# include <sys/sioctl.h>
# include <sys/pty.h>
#endif
#include "config.h"
#ifdef HAVE_STROPTS_H
#include <sys/stropts.h> /* for I_POP */
#endif
#include "screen.h"
#include "extern.h"
#if !defined(TIOCCONS) && defined(sun) && defined(SVR4)
# include <sys/strredir.h>
#endif
extern struct display *display, *displays;
extern int iflag;
#if (!defined(TIOCCONS) && defined(SRIOCSREDIR)) || defined(linux)
extern struct win *console_window;
static void consredir_readev_fn __P((struct event *, char *));
#endif
int separate_sids = 1;
static void DoSendBreak __P((int, int, int));
static sigret_t SigAlrmDummy __P(SIGPROTOARG);
/* Frank Schulz ([email protected]):
* I have no idea why VSTART is not defined and my fix is probably not
* the cleanest, but it works.
*/
#if !defined(VSTART) && defined(_VSTART)
#define VSTART _VSTART
#endif
#if !defined(VSTOP) && defined(_VSTOP)
#define VSTOP _VSTOP
#endif
#ifndef O_NOCTTY
# define O_NOCTTY 0
#endif
#ifndef TTYVMIN
# define TTYVMIN 1
#endif
#ifndef TTYVTIME
#define TTYVTIME 0
#endif
static sigret_t
SigAlrmDummy SIGDEFARG
{
debug("SigAlrmDummy()\n");
SIGRETURN;
}
/*
* Carefully open a charcter device. Not used to open display ttys.
* The second parameter is parsed for a few stty style options.
*/
int
OpenTTY(line, opt)
char *line, *opt;
{
int f;
struct mode Mode;
sigret_t (*sigalrm)__P(SIGPROTOARG);
sigalrm = signal(SIGALRM, SigAlrmDummy);
alarm(2);
/* this open only succeeds, if real uid is allowed */
if ((f = secopen(line, O_RDWR | O_NONBLOCK | O_NOCTTY, 0)) == -1)
{
if (errno == EINTR)
Msg(0, "Cannot open line '%s' for R/W: open() blocked, aborted.", line);
else
Msg(errno, "Cannot open line '%s' for R/W", line);
alarm(0);
signal(SIGALRM, sigalrm);
return -1;
}
if (!isatty(f))
{
Msg(0, "'%s' is not a tty", line);
alarm(0);
signal(SIGALRM, sigalrm);
close(f);
return -1;
}
#if defined(I_POP) && defined(POP_TTYMODULES)
debug("OpenTTY I_POP\n");
while (ioctl(f, I_POP, (char *)0) >= 0)
;
#endif
/*
* We come here exclusively. This is to stop all kermit and cu type things
* accessing the same tty line.
* Perhaps we should better create a lock in some /usr/spool/locks directory?
*/
#ifdef TIOCEXCL
errno = 0;
if (ioctl(f, TIOCEXCL, (char *) 0) < 0)
Msg(errno, "%s: ioctl TIOCEXCL failed", line);
debug3("%d %d %d\n", getuid(), geteuid(), getpid());
debug2("%s TIOCEXCL errno %d\n", line, errno);
#endif /* TIOCEXCL */
/*
* We create a sane tty mode. We do not copy things from the display tty
*/
#if WE_REALLY_WANT_TO_COPY_THE_TTY_MODE
if (display)
{
debug1("OpenTTY: using mode of display for %s\n", line);
Mode = D_NewMode;
}
else
#endif
InitTTY(&Mode, W_TYPE_PLAIN);
SttyMode(&Mode, opt);
#ifdef DEBUG
DebugTTY(&Mode);
#endif
SetTTY(f, &Mode);
#if defined(linux) && defined(TIOCMSET)
{
int mcs = 0;
ioctl(f, TIOCMGET, &mcs);
mcs |= TIOCM_RTS;
ioctl(f, TIOCMSET, &mcs);
}
#endif
brktty(f);
alarm(0);
signal(SIGALRM, sigalrm);
debug2("'%s' CONNECT fd=%d.\n", line, f);
return f;
}
/*
* Tty mode handling
*/
void
InitTTY(m, ttyflag)
struct mode *m;
int ttyflag;
{
bzero((char *)m, sizeof(*m));
#ifdef POSIX
/* struct termios tio
* defaults, as seen on SunOS 4.1.3
*/
debug1("InitTTY: POSIX: termios defaults based on SunOS 4.1.3, but better (%d)\n", ttyflag);
IF{BRKINT} m->tio.c_iflag |= BRKINT;
IF{IGNPAR} m->tio.c_iflag |= IGNPAR;
/* IF{ISTRIP} m->tio.c_iflag |= ISTRIP; may be needed, let's try. jw. */
IF{IXON} m->tio.c_iflag |= IXON;
/* IF{IMAXBEL} m->tio.c_iflag |= IMAXBEL; sorry, this one is ridiculus. jw */
if (!ttyflag) /* may not even be good for ptys.. */
{
IF{ICRNL} m->tio.c_iflag |= ICRNL;
IF{ONLCR} m->tio.c_oflag |= ONLCR;
IF{TAB3} m->tio.c_oflag |= TAB3;
IF{OXTABS} m->tio.c_oflag |= OXTABS;
/* IF{PARENB} m->tio.c_cflag |= PARENB; nah! jw. */
IF{OPOST} m->tio.c_oflag |= OPOST;
}
/*
* Or-ing the speed into c_cflags is dangerous.
* It breaks on bsdi, where c_ispeed and c_ospeed are extra longs.
*
* IF{B9600} m->tio.c_cflag |= B9600;
* IF{IBSHIFT) && defined(B9600} m->tio.c_cflag |= B9600 << IBSHIFT;
*
* We hope that we have the posix calls to do it right:
* If these are not available you might try the above.
*/
IF{B9600} cfsetospeed(&m->tio, B9600);
IF{B9600} cfsetispeed(&m->tio, B9600);
IF{CS8} m->tio.c_cflag |= CS8;
IF{CREAD} m->tio.c_cflag |= CREAD;
IF{CLOCAL} m->tio.c_cflag |= CLOCAL;
IF{ECHOCTL} m->tio.c_lflag |= ECHOCTL;
IF{ECHOKE} m->tio.c_lflag |= ECHOKE;
if (!ttyflag)
{
IF{ISIG} m->tio.c_lflag |= ISIG;
IF{ICANON} m->tio.c_lflag |= ICANON;
IF{ECHO} m->tio.c_lflag |= ECHO;
}
IF{ECHOE} m->tio.c_lflag |= ECHOE;
IF{ECHOK} m->tio.c_lflag |= ECHOK;
IF{IEXTEN} m->tio.c_lflag |= IEXTEN;
XIF{VINTR} m->tio.c_cc[VINTR] = Ctrl('C');
XIF{VQUIT} m->tio.c_cc[VQUIT] = Ctrl('\\');
XIF{VERASE} m->tio.c_cc[VERASE] = 0x7f; /* DEL */
XIF{VKILL} m->tio.c_cc[VKILL] = Ctrl('H');
XIF{VEOF} m->tio.c_cc[VEOF] = Ctrl('D');
XIF{VEOL} m->tio.c_cc[VEOL] = 0000;
XIF{VEOL2} m->tio.c_cc[VEOL2] = 0000;
XIF{VSWTCH} m->tio.c_cc[VSWTCH] = 0000;
XIF{VSTART} m->tio.c_cc[VSTART] = Ctrl('Q');
XIF{VSTOP} m->tio.c_cc[VSTOP] = Ctrl('S');
XIF{VSUSP} m->tio.c_cc[VSUSP] = Ctrl('Z');
XIF{VDSUSP} m->tio.c_cc[VDSUSP] = Ctrl('Y');
XIF{VREPRINT} m->tio.c_cc[VREPRINT] = Ctrl('R');
XIF{VDISCARD} m->tio.c_cc[VDISCARD] = Ctrl('O');
XIF{VWERASE} m->tio.c_cc[VWERASE] = Ctrl('W');
XIF{VLNEXT} m->tio.c_cc[VLNEXT] = Ctrl('V');
XIF{VSTATUS} m->tio.c_cc[VSTATUS] = Ctrl('T');
if (ttyflag)
{
m->tio.c_cc[VMIN] = TTYVMIN;
m->tio.c_cc[VTIME] = TTYVTIME;
}
# ifdef HPUX_LTCHARS_HACK
m->m_ltchars.t_suspc = Ctrl('Z');
m->m_ltchars.t_dsuspc = Ctrl('Y');
m->m_ltchars.t_rprntc = Ctrl('R');
m->m_ltchars.t_flushc = Ctrl('O');
m->m_ltchars.t_werasc = Ctrl('W');
m->m_ltchars.t_lnextc = Ctrl('V');
# endif /* HPUX_LTCHARS_HACK */
#else /* POSIX */
# ifdef TERMIO
debug1("InitTTY: nonPOSIX, struct termio a la Motorola SYSV68 (%d)\n", ttyflag);
/* struct termio tio
* defaults, as seen on Mototola SYSV68:
* input: 7bit, CR->NL, ^S/^Q flow control
* output: POSTprocessing: NL->NL-CR, Tabs to spaces
* control: 9600baud, 8bit CSIZE, enable input
* local: enable signals, erase/kill processing, echo on.
*/
IF{ISTRIP} m->tio.c_iflag |= ISTRIP;
IF{IXON} m->tio.c_iflag |= IXON;
if (!ttyflag) /* may not even be good for ptys.. */
{
IF{OPOST} m->tio.c_oflag |= OPOST;
IF{ICRNL} m->tio.c_iflag |= ICRNL;
IF{ONLCR} m->tio.c_oflag |= ONLCR;
IF{TAB3} m->tio.c_oflag |= TAB3;
}
#ifdef __bsdi__
)-: cannot handle BSDI without POSIX
#else
IF{B9600} m->tio.c_cflag = B9600;
#endif
IF{CS8} m->tio.c_cflag |= CS8;
IF{CREAD} m->tio.c_cflag |= CREAD;
if (!ttyflag)
{
IF{ISIG} m->tio.c_lflag |= ISIG;
IF{ICANON} m->tio.c_lflag |= ICANON;
IF{ECHO} m->tio.c_lflag |= ECHO;
}
IF{ECHOE} m->tio.c_lflag |= ECHOE;
IF{ECHOK} m->tio.c_lflag |= ECHOK;
XIF{VINTR} m->tio.c_cc[VINTR] = Ctrl('C');
XIF{VQUIT} m->tio.c_cc[VQUIT] = Ctrl('\\');
XIF{VERASE} m->tio.c_cc[VERASE] = 0177; /* DEL */
XIF{VKILL} m->tio.c_cc[VKILL] = Ctrl('H');
XIF{VEOF} m->tio.c_cc[VEOF] = Ctrl('D');
XIF{VEOL} m->tio.c_cc[VEOL] = 0377;
XIF{VEOL2} m->tio.c_cc[VEOL2] = 0377;
XIF{VSWTCH} m->tio.c_cc[VSWTCH] = 0000;
if (ttyflag)
{
m->tio.c_cc[VMIN] = TTYVMIN;
m->tio.c_cc[VTIME] = TTYVTIME;
}
# else /* TERMIO */
debug1("InitTTY: BSD: defaults a la SunOS 4.1.3 (%d)\n", ttyflag);
m->m_ttyb.sg_ispeed = B9600;
m->m_ttyb.sg_ospeed = B9600;
m->m_ttyb.sg_erase = 0177; /*DEL */
m->m_ttyb.sg_kill = Ctrl('H');
if (!ttyflag)
m->m_ttyb.sg_flags = CRMOD | ECHO
IF{ANYP} | ANYP
;
else
m->m_ttyb.sg_flags = CBREAK
IF{ANYP} | ANYP
;
m->m_tchars.t_intrc = Ctrl('C');
m->m_tchars.t_quitc = Ctrl('\\');
m->m_tchars.t_startc = Ctrl('Q');
m->m_tchars.t_stopc = Ctrl('S');
m->m_tchars.t_eofc = Ctrl('D');
m->m_tchars.t_brkc = -1;
m->m_ltchars.t_suspc = Ctrl('Z');
m->m_ltchars.t_dsuspc = Ctrl('Y');
m->m_ltchars.t_rprntc = Ctrl('R');
m->m_ltchars.t_flushc = Ctrl('O');
m->m_ltchars.t_werasc = Ctrl('W');
m->m_ltchars.t_lnextc = Ctrl('V');
IF{NTTYDISC} m->m_ldisc = NTTYDISC;
m->m_lmode = 0
IF{LDECCTQ} | LDECCTQ
IF{LCTLECH} | LCTLECH
IF{LPASS8} | LPASS8
IF{LCRTKIL} | LCRTKIL
IF{LCRTERA} | LCRTERA
IF{LCRTBS} | LCRTBS
;
# endif /* TERMIO */
#endif /* POSIX */
#if defined(ENCODINGS) && defined(TIOCKSET)
m->m_jtchars.t_ascii = 'J';
m->m_jtchars.t_kanji = 'B';
m->m_knjmode = KM_ASCII | KM_SYSSJIS;
#endif
}
void
SetTTY(fd, mp)
int fd;
struct mode *mp;
{
errno = 0;
#ifdef POSIX
tcsetattr(fd, TCSADRAIN, &mp->tio);
# ifdef HPUX_LTCHARS_HACK
ioctl(fd, TIOCSLTC, (char *)&mp->m_ltchars);
# endif
#else
# ifdef TERMIO
ioctl(fd, TCSETAW, (char *)&mp->tio);
# ifdef CYTERMIO
if (mp->tio.c_line == 3)
{
ioctl(fd, LDSETMAPKEY, (char *)&mp->m_mapkey);
ioctl(fd, LDSETMAPSCREEN, (char *)&mp->m_mapscreen);
ioctl(fd, LDSETBACKSPACE, (char *)&mp->m_backspace);
}
# endif
# else
/* ioctl(fd, TIOCSETP, (char *)&mp->m_ttyb); */
ioctl(fd, TIOCSETC, (char *)&mp->m_tchars);
ioctl(fd, TIOCLSET, (char *)&mp->m_lmode);
ioctl(fd, TIOCSETD, (char *)&mp->m_ldisc);
ioctl(fd, TIOCSETP, (char *)&mp->m_ttyb);
ioctl(fd, TIOCSLTC, (char *)&mp->m_ltchars); /* moved here for apollo. jw */
# endif
#endif
#if defined(ENCODINGS) && defined(TIOCKSET)
ioctl(fd, TIOCKSETC, &mp->m_jtchars);
ioctl(fd, TIOCKSET, &mp->m_knjmode);
#endif
if (errno)
Msg(errno, "SetTTY (fd %d): ioctl failed", fd);
}
void
GetTTY(fd, mp)
int fd;
struct mode *mp;
{
errno = 0;
#ifdef POSIX
tcgetattr(fd, &mp->tio);
# ifdef HPUX_LTCHARS_HACK
ioctl(fd, TIOCGLTC, (char *)&mp->m_ltchars);
# endif
#else
# ifdef TERMIO
ioctl(fd, TCGETA, (char *)&mp->tio);
# ifdef CYTERMIO
if (mp->tio.c_line == 3)
{
ioctl(fd, LDGETMAPKEY, (char *)&mp->m_mapkey);
ioctl(fd, LDGETMAPSCREEN, (char *)&mp->m_mapscreen);
ioctl(fd, LDGETBACKSPACE, (char *)&mp->m_backspace);
}
else
{
mp->m_mapkey = NOMAPKEY;
mp->m_mapscreen = NOMAPSCREEN;
mp->m_backspace = '\b';
}
# endif
# else
ioctl(fd, TIOCGETP, (char *)&mp->m_ttyb);
ioctl(fd, TIOCGETC, (char *)&mp->m_tchars);
ioctl(fd, TIOCGLTC, (char *)&mp->m_ltchars);
ioctl(fd, TIOCLGET, (char *)&mp->m_lmode);
ioctl(fd, TIOCGETD, (char *)&mp->m_ldisc);
# endif
#endif
#if defined(ENCODINGS) && defined(TIOCKSET)
ioctl(fd, TIOCKGETC, &mp->m_jtchars);
ioctl(fd, TIOCKGET, &mp->m_knjmode);
#endif
if (errno)
Msg(errno, "GetTTY (fd %d): ioctl failed", fd);
}
/*
* needs interrupt = iflag and flow = d->d_flow
*/
void
SetMode(op, np, flow, interrupt)
struct mode *op, *np;
int flow, interrupt;
{
*np = *op;
ASSERT(display);
#if defined(TERMIO) || defined(POSIX)
# ifdef CYTERMIO
np->m_mapkey = NOMAPKEY;
np->m_mapscreen = NOMAPSCREEN;
np->tio.c_line = 0;
# endif
IF{ICRNL} np->tio.c_iflag &= ~ICRNL;
IF{ISTRIP} np->tio.c_iflag &= ~ISTRIP;
IF{ONLCR} np->tio.c_oflag &= ~ONLCR;
np->tio.c_lflag &= ~(ICANON | ECHO);
/*
* From Andrew Myers ([email protected])
* to avoid ^V^V-Problem on OSF1
*/
IF{IEXTEN} np->tio.c_lflag &= ~IEXTEN;
/*
* Unfortunately, the master process never will get SIGINT if the real
* terminal is different from the one on which it was originaly started
* (process group membership has not been restored or the new tty could not
* be made controlling again). In my solution, it is the attacher who
* receives SIGINT (because it is always correctly associated with the real
* tty) and forwards it to the master [kill(MasterPid, SIGINT)].
* Marc Boucher ([email protected])
*/
if (interrupt)
np->tio.c_lflag |= ISIG;
else
np->tio.c_lflag &= ~ISIG;
/*
* careful, careful catche monkey..
* never set VMIN and VTIME to zero, if you want blocking io.
*
* We may want to do a VMIN > 0, VTIME > 0 read on the ptys too, to
* reduce interrupt frequency. But then we would not know how to
* handle read returning 0. jw.
*/
np->tio.c_cc[VMIN] = 1;
np->tio.c_cc[VTIME] = 0;
if (!interrupt || !flow)
np->tio.c_cc[VINTR] = VDISABLE;
np->tio.c_cc[VQUIT] = VDISABLE;
if (flow == 0)
{
XIF{VSTART} np->tio.c_cc[VSTART] = VDISABLE;
XIF{VSTOP} np->tio.c_cc[VSTOP] = VDISABLE;
np->tio.c_iflag &= ~IXON;
}
XIF{VDISCARD} np->tio.c_cc[VDISCARD] = VDISABLE;
XIF{VLNEXT} np->tio.c_cc[VLNEXT] = VDISABLE;
XIF{VSTATUS} np->tio.c_cc[VSTATUS] = VDISABLE;
XIF{VSUSP} np->tio.c_cc[VSUSP] = VDISABLE;
/* Set VERASE to DEL, rather than VDISABLE, to avoid libvte
"autodetect" issues. */
XIF{VERASE} np->tio.c_cc[VERASE] = 0x7f;
XIF{VKILL} np->tio.c_cc[VKILL] = VDISABLE;
# ifdef HPUX_LTCHARS_HACK
np->m_ltchars.t_suspc = VDISABLE;
np->m_ltchars.t_dsuspc = VDISABLE;
np->m_ltchars.t_rprntc = VDISABLE;
np->m_ltchars.t_flushc = VDISABLE;
np->m_ltchars.t_werasc = VDISABLE;
np->m_ltchars.t_lnextc = VDISABLE;
# else /* HPUX_LTCHARS_HACK */
XIF{VDSUSP} np->tio.c_cc[VDSUSP] = VDISABLE;
XIF{VREPRINT} np->tio.c_cc[VREPRINT] = VDISABLE;
XIF{VWERASE} np->tio.c_cc[VWERASE] = VDISABLE;
# endif /* HPUX_LTCHARS_HACK */
#else /* TERMIO || POSIX */
if (!interrupt || !flow)
np->m_tchars.t_intrc = -1;
np->m_ttyb.sg_flags &= ~(CRMOD | ECHO);
np->m_ttyb.sg_flags |= CBREAK;
# if defined(CYRILL) && defined(CSTYLE) && defined(CS_8BITS)
np->m_ttyb.sg_flags &= ~CSTYLE;
np->m_ttyb.sg_flags |= CS_8BITS;
# endif
np->m_tchars.t_quitc = -1;
if (flow == 0)
{
np->m_tchars.t_startc = -1;
np->m_tchars.t_stopc = -1;
}
np->m_ltchars.t_suspc = -1;
np->m_ltchars.t_dsuspc = -1;
np->m_ltchars.t_flushc = -1;
np->m_ltchars.t_lnextc = -1;
#endif /* defined(TERMIO) || defined(POSIX) */
}
/* operates on display */
void
SetFlow(on)
int on;
{
ASSERT(display);
if (D_flow == on)
return;
#if defined(TERMIO) || defined(POSIX)
if (on)
{
D_NewMode.tio.c_cc[VINTR] = iflag ? D_OldMode.tio.c_cc[VINTR] : VDISABLE;
XIF{VSTART} D_NewMode.tio.c_cc[VSTART] = D_OldMode.tio.c_cc[VSTART];
XIF{VSTOP} D_NewMode.tio.c_cc[VSTOP] = D_OldMode.tio.c_cc[VSTOP];
D_NewMode.tio.c_iflag |= D_OldMode.tio.c_iflag & IXON;
}
else
{
D_NewMode.tio.c_cc[VINTR] = VDISABLE;
XIF{VSTART} D_NewMode.tio.c_cc[VSTART] = VDISABLE;
XIF{VSTOP} D_NewMode.tio.c_cc[VSTOP] = VDISABLE;
D_NewMode.tio.c_iflag &= ~IXON;
}
# ifdef POSIX
# ifdef TCOON
if (!on)
tcflow(D_userfd, TCOON);
# endif
if (tcsetattr(D_userfd, TCSANOW, &D_NewMode.tio))
# else
if (ioctl(D_userfd, TCSETAW, (char *)&D_NewMode.tio) != 0)
# endif
debug1("SetFlow: ioctl errno %d\n", errno);
#else /* POSIX || TERMIO */
if (on)
{
D_NewMode.m_tchars.t_intrc = iflag ? D_OldMode.m_tchars.t_intrc : -1;
D_NewMode.m_tchars.t_startc = D_OldMode.m_tchars.t_startc;
D_NewMode.m_tchars.t_stopc = D_OldMode.m_tchars.t_stopc;
}
else
{
D_NewMode.m_tchars.t_intrc = -1;
D_NewMode.m_tchars.t_startc = -1;
D_NewMode.m_tchars.t_stopc = -1;
}
if (ioctl(D_userfd, TIOCSETC, (char *)&D_NewMode.m_tchars) != 0)
debug1("SetFlow: ioctl errno %d\n", errno);
#endif /* POSIX || TERMIO */
D_flow = on;
}
/* parse commands from opt and modify m */
int
SttyMode(m, opt)
struct mode *m;
char *opt;
{
static const char sep[] = " \t:;,";
if (!opt)
return 0;
while (*opt)
{
while (index(sep, *opt)) opt++;
if (*opt >= '0' && *opt <= '9')
{
if (SetBaud(m, atoi(opt), atoi(opt)))
return -1;
}
else if (!strncmp("cs7", opt, 3))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_cflag &= ~CSIZE;
m->tio.c_cflag |= CS7;
#else
m->m_lmode &= ~LPASS8;
#endif
}
else if (!strncmp("cs8", opt, 3))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_cflag &= ~CSIZE;
m->tio.c_cflag |= CS8;
#else
m->m_lmode |= LPASS8;
#endif
}
else if (!strncmp("istrip", opt, 6))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_iflag |= ISTRIP;
#else
m->m_lmode &= ~LPASS8;
#endif
}
else if (!strncmp("-istrip", opt, 7))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_iflag &= ~ISTRIP;
#else
m->m_lmode |= LPASS8;
#endif
}
else if (!strncmp("ixon", opt, 4))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_iflag |= IXON;
#else
debug("SttyMode: no ixon in old bsd land.\n");
#endif
}
else if (!strncmp("-ixon", opt, 5))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_iflag &= ~IXON;
#else
debug("SttyMode: no -ixon in old bsd land.\n");
#endif
}
else if (!strncmp("ixoff", opt, 5))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_iflag |= IXOFF;
#else
m->m_ttyb.sg_flags |= TANDEM;
#endif
}
else if (!strncmp("-ixoff", opt, 6))
{
#if defined(POSIX) || defined(TERMIO)
m->tio.c_iflag &= ~IXOFF;
#else
m->m_ttyb.sg_flags &= ~TANDEM;
#endif
}
else if (!strncmp("crtscts", opt, 7))
{
#if (defined(POSIX) || defined(TERMIO)) && defined(CRTSCTS)
m->tio.c_cflag |= CRTSCTS;
#endif
}
else if (!strncmp("-crtscts", opt, 8))
{
#if (defined(POSIX) || defined(TERMIO)) && defined(CRTSCTS)
m->tio.c_cflag &= ~CRTSCTS;
#endif
}
else
return -1;
while (*opt && !index(sep, *opt)) opt++;
}
return 0;
}
/*
* Job control handling
*
* Somehow the ultrix session handling is broken, so use
* the bsdish variant.
*/
/*ARGSUSED*/
void
brktty(fd)
int fd;
{
#if defined(POSIX) && !defined(ultrix)
if (separate_sids)
setsid(); /* will break terminal affiliation */
/* GNU added for Hurd systems 2001-10-10 */
# if defined(BSD) && defined(TIOCSCTTY) && !defined(__GNU__)
ioctl(fd, TIOCSCTTY, (char *)0);
# endif /* BSD && TIOCSCTTY */
#else /* POSIX */
# ifdef SYSV
if (separate_sids)
setpgrp(); /* will break terminal affiliation */
# else /* SYSV */
# ifdef BSDJOBS
int devtty;
if ((devtty = open("/dev/tty", O_RDWR | O_NONBLOCK)) >= 0)
{
if (ioctl(devtty, TIOCNOTTY, (char *)0))
debug2("brktty: ioctl(devtty=%d, TIOCNOTTY, 0) = %d\n", devtty, errno);
close(devtty);
}
# endif /* BSDJOBS */
# endif /* SYSV */
#endif /* POSIX */
}
int
fgtty(fd)
int fd;
{
#ifdef BSDJOBS
int mypid;
mypid = getpid();
/* The next lines should be obsolete. Can anybody check if they
* are really needed on the BSD platforms?
*
* this is to avoid the message:
* fgtty: Not a typewriter (25)
*/
# if defined(__osf__) || (BSD >= 199103) || defined(ISC)
if (separate_sids)
setsid(); /* should be already done */
# ifdef TIOCSCTTY
ioctl(fd, TIOCSCTTY, (char *)0);
# endif
# endif
# ifdef POSIX
if (separate_sids)
if (tcsetpgrp(fd, mypid))
{
debug1("fgtty: tcsetpgrp: %d\n", errno);
return -1;
}
# else /* POSIX */
if (ioctl(fd, TIOCSPGRP, (char *)&mypid) != 0)
debug1("fgtty: TIOSETPGRP: %d\n", errno);
# ifndef SYSV /* Already done in brktty():setpgrp() */
if (separate_sids)
if (setpgrp(fd, mypid))
debug1("fgtty: setpgrp: %d\n", errno);
# endif
# endif /* POSIX */
#endif /* BSDJOBS */
return 0;
}
/*
* The alm boards on our sparc center 1000 have a lousy driver.
* We cannot generate long breaks unless we use the most ugly form
* of ioctls. jw.
*/
#ifdef POSIX
int breaktype = 2;
#else /* POSIX */
# ifdef TCSBRK
int breaktype = 1;
# else
int breaktype = 0;
# endif
#endif /* POSIX */
#if defined(sun) && !defined(SVR4)
# define HAVE_SUPER_TCSENDBREAK
#endif
/*
* type:
* 0: TIOCSBRK / TIOCCBRK
* 1: TCSBRK
* 2: tcsendbreak()
* n: approximate duration in 1/4 seconds.
*/
static void
DoSendBreak(fd, n, type)
int fd, n, type;
{
switch (type)
{
case 2: /* tcsendbreak() =============================== */
#ifdef POSIX
# ifdef HAVE_SUPER_TCSENDBREAK
/* There is one rare case that I have tested, where tcsendbreak works
* really great: this was an alm driver that came with SunOS 4.1.3
* If you have this one, define the above symbol.
* here we can use the second parameter to specify the duration.
*/
debug2("tcsendbreak(fd=%d, %d)\n", fd, n);
if (tcsendbreak(fd, n) < 0)
Msg(errno, "cannot send BREAK (tcsendbreak)");
# else
/*
* here we hope, that multiple calls to tcsendbreak() can
* be concatenated to form a long break, as we do not know
* what exact interpretation the second parameter has:
*
* - sunos 4: duration in quarter seconds
* - sunos 5: 0 a short break, nonzero a tcdrain()
* - hpux, irix: ignored
* - mot88: duration in milliseconds
* - aix: duration in milliseconds, but 0 is 25 milliseconds.
*/
debug2("%d * tcsendbreak(fd=%d, 0)\n", n, fd);
{
int i;
if (!n)
n++;
for (i = 0; i < n; i++)
if (tcsendbreak(fd, 0) < 0)
{
Msg(errno, "cannot send BREAK (tcsendbreak SVR4)");
return;
}
}
# endif
#else /* POSIX */
Msg(0, "tcsendbreak() not available, change breaktype");
#endif /* POSIX */
break;
case 1: /* TCSBRK ======================================= */
#ifdef TCSBRK
if (!n)
n++;
/*
* Here too, we assume that short breaks can be concatenated to
* perform long breaks. But for SOLARIS, this is not true, of course.
*/
debug2("%d * TCSBRK fd=%d\n", n, fd);
{
int i;
for (i = 0; i < n; i++)
if (ioctl(fd, TCSBRK, (char *)0) < 0)
{
Msg(errno, "Cannot send BREAK (TCSBRK)");
return;
}
}
#else /* TCSBRK */
Msg(0, "TCSBRK not available, change breaktype");
#endif /* TCSBRK */
break;
case 0: /* TIOCSBRK / TIOCCBRK ========================== */
#if defined(TIOCSBRK) && defined(TIOCCBRK)
/*
* This is very rude. Screen actively celebrates the break.
* But it may be the only save way to issue long breaks.
*/
debug("TIOCSBRK TIOCCBRK\n");
if (ioctl(fd, TIOCSBRK, (char *)0) < 0)
{
Msg(errno, "Can't send BREAK (TIOCSBRK)");
return;
}
sleep1000(n ? n * 250 : 250);
if (ioctl(fd, TIOCCBRK, (char *)0) < 0)
{
Msg(errno, "BREAK stuck!!! -- HELP! (TIOCCBRK)");
return;
}
#else /* TIOCSBRK && TIOCCBRK */
Msg(0, "TIOCSBRK/CBRK not available, change breaktype");
#endif /* TIOCSBRK && TIOCCBRK */
break;
default: /* unknown ========================== */
Msg(0, "Internal SendBreak error: method %d unknown", type);
}
}
/*
* Send a break for n * 0.25 seconds. Tty must be PLAIN.
* The longest possible break allowed here is 15 seconds.
*/
void
SendBreak(wp, n, closeopen)
struct win *wp;
int n, closeopen;
{
sigret_t (*sigalrm)__P(SIGPROTOARG);
#ifdef BUILTIN_TELNET
if (wp->w_type == W_TYPE_TELNET)
{
TelBreak(wp);
return;
}
#endif
if (wp->w_type != W_TYPE_PLAIN)
return;
debug3("break(%d, %d) fd %d\n", n, closeopen, wp->w_ptyfd);
#ifdef POSIX
(void) tcflush(wp->w_ptyfd, TCIOFLUSH);
#else
# ifdef TIOCFLUSH
(void) ioctl(wp->w_ptyfd, TIOCFLUSH, (char *)0);
# endif /* TIOCFLUSH */
#endif /* POSIX */
if (closeopen)
{
close(wp->w_ptyfd);
sleep1000(n ? n * 250 : 250);
if ((wp->w_ptyfd = OpenTTY(wp->w_tty, wp->w_cmdargs[1])) < 1)
{
Msg(0, "Ouch, cannot reopen line %s, please try harder", wp->w_tty);
return;
}
(void) fcntl(wp->w_ptyfd, F_SETFL, FNBLOCK);
}
else
{
sigalrm = signal(SIGALRM, SigAlrmDummy);
alarm(15);
DoSendBreak(wp->w_ptyfd, n, breaktype);
alarm(0);
signal(SIGALRM, sigalrm);
}
debug(" broken.\n");
}
/*
* Console grabbing
*/
#if (!defined(TIOCCONS) && defined(SRIOCSREDIR)) || defined(linux)
static struct event consredir_ev;
static int consredirfd[2] = {-1, -1};
static void
consredir_readev_fn(ev, data)
struct event *ev;
char *data;
{
char *p, *n, buf[256];
int l;
if (!console_window || (l = read(consredirfd[0], buf, sizeof(buf))) <= 0)
{
close(consredirfd[0]);
close(consredirfd[1]);
consredirfd[0] = consredirfd[1] = -1;
evdeq(ev);
return;
}
for (p = n = buf; l > 0; n++, l--)
if (*n == '\n')
{
if (n > p)
WriteString(console_window, p, n - p);
WriteString(console_window, "\r\n", 2);
p = n + 1;
}
if (n > p)
WriteString(console_window, p, n - p);
}
#endif
/*ARGSUSED*/
int
TtyGrabConsole(fd, on, rc_name)
int fd, on;
char *rc_name;
{
#if defined(TIOCCONS) && !defined(linux)
struct display *d;
int ret = 0;
int sfd = -1;
if (on < 0)
return 0; /* pty close will ungrab */
if (on)
{
if (displays == 0)
{
Msg(0, "I need a display");
return -1;
}
for (d = displays; d; d = d->d_next)
if (strcmp(d->d_usertty, "/dev/console") == 0)
break;
if (d)
{
Msg(0, "too dangerous - screen is running on /dev/console");
return -1;
}
}
if (!on)
{
char *slave;
if ((fd = OpenPTY(&slave)) < 0)
{
Msg(errno, "%s: could not open detach pty master", rc_name);
return -1;
}
if ((sfd = open(slave, O_RDWR | O_NOCTTY)) < 0)
{
Msg(errno, "%s: could not open detach pty slave", rc_name);
close(fd);
return -1;
}
}
if (UserContext() == 1)
UserReturn(ioctl(fd, TIOCCONS, (char *)&on));
ret = UserStatus();
if (ret)
Msg(errno, "%s: ioctl TIOCCONS failed", rc_name);
if (!on)
{
close(sfd);
close(fd);
}
return ret;
#else
# if defined(SRIOCSREDIR) || defined(linux)
struct display *d;
# ifdef SRIOCSREDIR
int cfd;
# else
struct mode new1, new2;
char *slave;
# endif
if (on > 0)
{
if (displays == 0)
{
Msg(0, "I need a display");
return -1;
}
for (d = displays; d; d = d->d_next)
if (strcmp(d->d_usertty, "/dev/console") == 0)
break;
if (d)
{
Msg(0, "too dangerous - screen is running on /dev/console");
return -1;
}
}
if (consredirfd[0] >= 0)
{
evdeq(&consredir_ev);
close(consredirfd[0]);
close(consredirfd[1]);
consredirfd[0] = consredirfd[1] = -1;
}
if (on <= 0)
return 0;
# ifdef SRIOCSREDIR
if ((cfd = secopen("/dev/console", O_RDWR|O_NOCTTY, 0)) == -1)
{
Msg(errno, "/dev/console");
return -1;
}
if (pipe(consredirfd))
{
Msg(errno, "pipe");
close(cfd);
consredirfd[0] = consredirfd[1] = -1;
return -1;
}
if (ioctl(cfd, SRIOCSREDIR, consredirfd[1]))
{
Msg(errno, "SRIOCSREDIR ioctl");
close(cfd);
close(consredirfd[0]);
close(consredirfd[1]);
consredirfd[0] = consredirfd[1] = -1;
return -1;
}
close(cfd);
# else
/* special linux workaround for a too restrictive kernel */
if ((consredirfd[0] = OpenPTY(&slave)) < 0)
{
Msg(errno, "%s: could not open detach pty master", rc_name);
return -1;
}
if ((consredirfd[1] = open(slave, O_RDWR | O_NOCTTY)) < 0)
{
Msg(errno, "%s: could not open detach pty slave", rc_name);
close(consredirfd[0]);
return -1;
}
InitTTY(&new1, 0);
SetMode(&new1, &new2, 0, 0);
SetTTY(consredirfd[1], &new2);
if (UserContext() == 1)
UserReturn(ioctl(consredirfd[1], TIOCCONS, (char *)&on));
if (UserStatus())
{
Msg(errno, "%s: ioctl TIOCCONS failed", rc_name);
close(consredirfd[0]);
close(consredirfd[1]);
return -1;
}
# endif
consredir_ev.fd = consredirfd[0];
consredir_ev.type = EV_READ;
consredir_ev.handler = consredir_readev_fn;
evenq(&consredir_ev);
return 0;
# else
if (on > 0)
Msg(0, "%s: don't know how to grab the console", rc_name);
return -1;
# endif
#endif
}
/*
* Read modem control lines of a physical tty and write them to buf
* in a readable format.
* Will not write more than 256 characters to buf.
* Returns buf;
*/
char *
TtyGetModemStatus(fd, buf)
int fd;
char *buf;
{
char *p = buf;
#ifdef TIOCGSOFTCAR
unsigned int softcar;
#endif
#if defined(TIOCMGET) || defined(TIOCMODG)
unsigned int mflags;
#else
# ifdef MCGETA
/* this is yet another interface, found on hpux. grrr */
mflag mflags;
IF{MDTR}# define TIOCM_DTR MDTR
IF{MRTS}# define TIOCM_RTS MRTS
IF{MDSR}# define TIOCM_DSR MDSR
IF{MDCD}# define TIOCM_CAR MDCD
IF{MRI}# define TIOCM_RNG MRI
IF{MCTS}# define TIOCM_CTS MCTS
# endif
#endif
#if defined(CLOCAL) || defined(CRTSCTS)
struct mode mtio; /* screen.h */
#endif
#if defined(CRTSCTS) || defined(TIOCM_CTS)
int rtscts;
#endif
int clocal;
#if defined(CLOCAL) || defined(CRTSCTS)
GetTTY(fd, &mtio);
#endif
clocal = 0;
#ifdef CLOCAL
if (mtio.tio.c_cflag & CLOCAL)
{
clocal = 1;
*p++ = '{';
}
#endif
#ifdef TIOCM_CTS
# ifdef CRTSCTS
if (!(mtio.tio.c_cflag & CRTSCTS))
rtscts = 0;
else
# endif /* CRTSCTS */
rtscts = 1;
#endif /* TIOCM_CTS */
#ifdef TIOCGSOFTCAR
if (ioctl(fd, TIOCGSOFTCAR, (char *)&softcar) < 0)
softcar = 0;
#endif
#if defined(TIOCMGET) || defined(TIOCMODG) || defined(MCGETA)
# ifdef TIOCMGET
if (ioctl(fd, TIOCMGET, (char *)&mflags) < 0)
# else
# ifdef TIOCMODG
if (ioctl(fd, TIOCMODG, (char *)&mflags) < 0)
# else
if (ioctl(fd, MCGETA, &mflags) < 0)
# endif
# endif
{
#ifdef TIOCGSOFTCAR
sprintf(p, "NO-TTY? %s", softcar ? "(CD)" : "CD");
#else
sprintf(p, "NO-TTY?");
#endif
p += strlen(p);
}
else
{
char *s;
# ifdef FANCY_MODEM
# ifdef TIOCM_LE
if (!(mflags & TIOCM_LE))
for (s = "!LE "; *s; *p++ = *s++);
# endif
# endif /* FANCY_MODEM */
# ifdef TIOCM_RTS
s = "!RTS "; if (mflags & TIOCM_RTS) s++;
while (*s) *p++ = *s++;
# endif
# ifdef TIOCM_CTS
s = "!CTS ";
if (!rtscts)
{
*p++ = '(';
s = "!CTS) ";
}
if (mflags & TIOCM_CTS) s++;
while (*s) *p++ = *s++;
# endif
# ifdef TIOCM_DTR
s = "!DTR "; if (mflags & TIOCM_DTR) s++;
while (*s) *p++ = *s++;
# endif
# ifdef TIOCM_DSR
s = "!DSR "; if (mflags & TIOCM_DSR) s++;
while (*s) *p++ = *s++;
# endif
# if defined(TIOCM_CD) || defined(TIOCM_CAR)
s = "!CD ";
# ifdef TIOCGSOFTCAR
if (softcar)
{
*p++ = '(';
s = "!CD) ";
}
# endif
# ifdef TIOCM_CD
if (mflags & TIOCM_CD) s++;
# else
if (mflags & TIOCM_CAR) s++;
# endif
while (*s) *p++ = *s++;
# endif
# if defined(TIOCM_RI) || defined(TIOCM_RNG)
# ifdef TIOCM_RI
if (mflags & TIOCM_RI)
# else
if (mflags & TIOCM_RNG)
# endif
for (s = "RI "; *s; *p++ = *s++);
# endif
# ifdef FANCY_MODEM
# ifdef TIOCM_ST
s = "!ST "; if (mflags & TIOCM_ST) s++;
while (*s) *p++ = *s++;
# endif
# ifdef TIOCM_SR
s = "!SR "; if (mflags & TIOCM_SR) s++;
while (*s) *p++ = *s++;
# endif
# endif /* FANCY_MODEM */
if (p > buf && p[-1] == ' ')
p--;
*p = '\0';
}
#else
# ifdef TIOCGSOFTCAR
sprintf(p, " %s", softcar ? "(CD)", "CD");
p += strlen(p);
# endif
#endif
if (clocal)
*p++ = '}';
*p = '\0';
return buf;
}
/*
* Old bsd-ish machines may not have any of the baudrate B... symbols.
* We hope to detect them here, so that the btable[] below always has
* many entries.
*/
#ifndef POSIX
# ifndef TERMIO
# if !defined(B9600) && !defined(B2400) && !defined(B1200) && !defined(B300)
IFN{B0}#define B0 0
IFN{B50}#define B50 1
IFN{B75}#define B75 2
IFN{B110}#define B110 3
IFN{B134}#define B134 4
IFN{B150}#define B150 5
IFN{B200}#define B200 6
IFN{B300}#define B300 7
IFN{B600}#define B600 8
IFN{B1200}#define B1200 9
IFN{B1800}#define B1800 10
IFN{B2400}#define B2400 11
IFN{B4800}#define B4800 12
IFN{B9600}#define B9600 13
IFN{EXTA}#define EXTA 14
IFN{EXTB}#define EXTB 15
# endif
# endif
#endif
/*
* On hpux, idx and sym will be different.
* Rumor has it that, we need idx in D_dospeed to make tputs
* padding correct.
* Frequently used entries come first.
*/
static struct baud_values btable[] =
{
IF{B9600} { 13, 9600, B9600 },
IF{B19200} { 14, 19200, B19200 },
IF{EXTA} { 14, 19200, EXTA },
IF{B38400} { 15, 38400, B38400 },
IF{EXTB} { 15, 38400, EXTB },
IF{B57600} { 16, 57600, B57600 },
IF{B115200} { 17, 115200, B115200 },
IF{B230400} { 18, 230400, B230400 },
IF{B460800} { 19, 460800, B460800 },
IF{B7200} { 13, 7200, B7200 },
IF{B4800} { 12, 4800, B4800 },
IF{B3600} { 12, 3600, B3600 },
IF{B2400} { 11, 2400, B2400 },
IF{B1800} { 10, 1800, B1800 },
IF{B1200} { 9, 1200, B1200 },
IF{B900} { 9, 900, B900 },
IF{B600} { 8, 600, B600 },
IF{B300} { 7, 300, B300 },
IF{B200} { 6, 200, B200 },
IF{B150} { 5, 150, B150 },
IF{B134} { 4, 134, B134 },
IF{B110} { 3, 110, B110 },
IF{B75} { 2, 75, B75 },
IF{B50} { 1, 50, B50 },
IF{B0} { 0, 0, B0 },
{ -1, -1, -1 }
};
/*
* baud may either be a bits-per-second value or a symbolic
* value as returned by cfget?speed()
*/
struct baud_values *
lookup_baud(baud)
int baud;
{
struct baud_values *p;
for (p = btable; p->idx >= 0; p++)
if (baud == p->bps || baud == p->sym)
return p;
return NULL;
}
/*
* change the baud rate in a mode structure.
* ibaud and obaud are given in bit/second, or at your option as
* termio B... symbols as defined in e.g. suns sys/ttydev.h
* -1 means don't change.
*/
int
SetBaud(m, ibaud, obaud)
struct mode *m;
int ibaud, obaud;
{
struct baud_values *ip, *op;
if ((!(ip = lookup_baud(ibaud)) && ibaud != -1) ||
(!(op = lookup_baud(obaud)) && obaud != -1))
return -1;
#ifdef POSIX
if (ip) cfsetispeed(&m->tio, ip->sym);
if (op) cfsetospeed(&m->tio, op->sym);
#else /* POSIX */
# ifdef TERMIO
if (ip)
{
# ifdef IBSHIFT
m->tio.c_cflag &= ~(CBAUD << IBSHIFT);
m->tio.c_cflag |= (ip->sym & CBAUD) << IBSHIFT;
# else /* IBSHIFT */
if (ibaud != obaud)
return -1;
# endif /* IBSHIFT */
}
if (op)
{
m->tio.c_cflag &= ~CBAUD;
m->tio.c_cflag |= op->sym & CBAUD;
}
# else /* TERMIO */
if (ip) m->m_ttyb.sg_ispeed = ip->idx;
if (op) m->m_ttyb.sg_ospeed = op->idx;
# endif /* TERMIO */
#endif /* POSIX */
return 0;
}
/*
* Write out the mode struct in a readable form
*/
#ifdef DEBUG
void
DebugTTY(m)
struct mode *m;
{
int i;
#ifdef POSIX
debug("struct termios tio:\n");
debug1("c_iflag = %#x\n", (unsigned int)m->tio.c_iflag);
debug1("c_oflag = %#x\n", (unsigned int)m->tio.c_oflag);
debug1("c_cflag = %#x\n", (unsigned int)m->tio.c_cflag);
debug1("c_lflag = %#x\n", (unsigned int)m->tio.c_lflag);
debug1("cfgetospeed() = %d\n", (int)cfgetospeed(&m->tio));
debug1("cfgetispeed() = %d\n", (int)cfgetispeed(&m->tio));
for (i = 0; i < sizeof(m->tio.c_cc)/sizeof(*m->tio.c_cc); i++)
{
debug2("c_cc[%d] = %#x\n", i, m->tio.c_cc[i]);
}
# ifdef HPUX_LTCHARS_HACK
debug1("suspc = %#02x\n", m->m_ltchars.t_suspc);
debug1("dsuspc = %#02x\n", m->m_ltchars.t_dsuspc);
debug1("rprntc = %#02x\n", m->m_ltchars.t_rprntc);
debug1("flushc = %#02x\n", m->m_ltchars.t_flushc);
debug1("werasc = %#02x\n", m->m_ltchars.t_werasc);
debug1("lnextc = %#02x\n", m->m_ltchars.t_lnextc);
# endif /* HPUX_LTCHARS_HACK */
#else /* POSIX */
# ifdef TERMIO
debug("struct termio tio:\n");
debug1("c_iflag = %04o\n", m->tio.c_iflag);
debug1("c_oflag = %04o\n", m->tio.c_oflag);
debug1("c_cflag = %04o\n", m->tio.c_cflag);
debug1("c_lflag = %04o\n", m->tio.c_lflag);
for (i = 0; i < sizeof(m->tio.c_cc)/sizeof(*m->tio.c_cc); i++)
{
debug2("c_cc[%d] = %04o\n", i, m->tio.c_cc[i]);
}
# else /* TERMIO */
debug1("sg_ispeed = %d\n", m->m_ttyb.sg_ispeed);
debug1("sg_ospeed = %d\n", m->m_ttyb.sg_ospeed);
debug1("sg_erase = %#02x\n", m->m_ttyb.sg_erase);
debug1("sg_kill = %#02x\n", m->m_ttyb.sg_kill);
debug1("sg_flags = %#04x\n", (unsigned short)m->m_ttyb.sg_flags);
debug1("intrc = %#02x\n", m->m_tchars.t_intrc);
debug1("quitc = %#02x\n", m->m_tchars.t_quitc);
debug1("startc = %#02x\n", m->m_tchars.t_startc);
debug1("stopc = %#02x\n", m->m_tchars.t_stopc);
debug1("eofc = %#02x\n", m->m_tchars.t_eofc);
debug1("brkc = %#02x\n", m->m_tchars.t_brkc);
debug1("suspc = %#02x\n", m->m_ltchars.t_suspc);
debug1("dsuspc = %#02x\n", m->m_ltchars.t_dsuspc);
debug1("rprntc = %#02x\n", m->m_ltchars.t_rprntc);
debug1("flushc = %#02x\n", m->m_ltchars.t_flushc);
debug1("werasc = %#02x\n", m->m_ltchars.t_werasc);
debug1("lnextc = %#02x\n", m->m_ltchars.t_lnextc);
debug1("ldisc = %d\n", m->m_ldisc);
debug1("lmode = %#x\n", m->m_lmode);
# endif /* TERMIO */
#endif /* POSIX */
}
#endif /* DEBUG */
|
delphinus35/screen
|
src/tty.sh
|
Shell
|
gpl-3.0
| 39,179 |
HOSTNAME=`ifconfig eth1|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
if [ not $HOSTNAME ] ; then
HOSTNAME=`ifconfig eth0|grep 'inet addr'|cut -d ":" -f2|cut -d " " -f1`
fi
hostname $HOSTNAME
chef-server-ctl reconfigure
|
deasmi/terraform-provider-libvirt
|
vendor/github.com/mitchellh/packer/examples/alicloud/chef/user_data.sh
|
Shell
|
apache-2.0
| 227 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. $(dirname ${BASH_SOURCE})/../util.sh
WHAT_WAS_RUN="$1"
desc "Resize the RC and watch the service backends change"
run "kubectl --namespace=demos scale $WHAT_WAS_RUN --replicas=1"
run "kubectl --namespace=demos scale $WHAT_WAS_RUN --replicas=2"
run "kubectl --namespace=demos scale $WHAT_WAS_RUN --replicas=5"
desc "Fire up a cloud load-balancer"
run "kubectl --namespace=demos get svc hostnames-svc -o yaml \\
| sed 's/ClusterIP/LoadBalancer/' \\
| kubectl replace -f -"
while true; do
run "kubectl --namespace=demos get svc hostnames -o yaml | grep loadBalancer -A 4"
if kubectl --namespace=demos get svc hostnames \
-o go-template='{{index (index .status.loadBalancer.ingress 0) "ip"}}' \
>/dev/null 2>&1; then
break
fi
done
|
chiradeep/contrib
|
micro-demos/services/split1_scale.sh
|
Shell
|
apache-2.0
| 1,396 |
#
# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 6173575
# @summary Unit tests for appendToBootstrapClassLoaderSearch and
# appendToSystemClasLoaderSearch methods.
#
# @build ClassUnloadTest
# @run shell ClassUnloadTest.sh
if [ "${TESTSRC}" = "" ]
then
echo "TESTSRC not set. Test cannot execute. Failed."
exit 1
fi
. ${TESTSRC}/CommonSetup.sh
# Create Foo and Bar
# Foo has a reference to Bar but we deleted Bar so that
# a NoClassDefFoundError will be thrown when Foo tries to
# resolve the reference to Bar
OTHERDIR="${TESTCLASSES}"/other
mkdir "${OTHERDIR}"
FOO="${OTHERDIR}"/Foo.java
BAR="${OTHERDIR}"/Bar.java
rm -f "${FOO}" "${BAR}"
cat << EOF > "${FOO}"
public class Foo {
public static boolean doSomething() {
try {
Bar b = new Bar();
return true;
} catch (NoClassDefFoundError x) {
return false;
}
}
}
EOF
echo "public class Bar { }" > "${BAR}"
(cd "${OTHERDIR}"; \
$JAVAC ${TESTJAVACOPTS} ${TESTTOOLVMOPTS} Foo.java Bar.java; \
$JAR ${TESTTOOLVMOPTS} cf "${OTHERDIR}"/Bar.jar Bar.class; \
rm -f Bar.class)
# Create the manifest
MANIFEST="${TESTCLASSES}"/agent.mf
rm -f "${MANIFEST}"
echo "Premain-Class: ClassUnloadTest" > "${MANIFEST}"
# Setup test case as an agent
$JAR ${TESTTOOLVMOPTS} -cfm "${TESTCLASSES}"/ClassUnloadTest.jar "${MANIFEST}" \
-C "${TESTCLASSES}" ClassUnloadTest.class
# Finally we run the test
(cd "${TESTCLASSES}"; \
$JAVA ${TESTVMOPTS} -Xverify:none -XX:+TraceClassUnloading \
-javaagent:ClassUnloadTest.jar ClassUnloadTest "${OTHERDIR}" Bar.jar)
|
stain/jdk8u
|
test/java/lang/instrument/appendToClassLoaderSearch/ClassUnloadTest.sh
|
Shell
|
gpl-2.0
| 2,604 |
function _rails_command () {
if [ -e "bin/rails" ]; then
bin/rails $@
elif [ -e "script/rails" ]; then
ruby script/rails $@
elif [ -e "script/server" ]; then
ruby script/$@
else
command rails $@
fi
}
function _rake_command () {
if [ -e "bin/rake" ]; then
bin/rake $@
else
command rake $@
fi
}
alias rails='_rails_command'
compdef _rails_command=rails
alias rake='_rake_command'
compdef _rake_command=rake
alias devlog='tail -f log/development.log'
alias prodlog='tail -f log/production.log'
alias testlog='tail -f log/test.log'
alias -g RED='RAILS_ENV=development'
alias -g REP='RAILS_ENV=production'
alias -g RET='RAILS_ENV=test'
# Rails aliases
alias rc='rails console'
alias rd='rails destroy'
alias rdb='rails dbconsole'
alias rg='rails generate'
alias rgm='rails generate migration'
alias rp='rails plugin'
alias ru='rails runner'
alias rs='rails server'
alias rsd='rails server --debugger'
# Rake aliases
alias rdm='rake db:migrate'
alias rdr='rake db:rollback'
alias rdc='rake db:create'
alias rds='rake db:seed'
alias rdd='rake db:drop'
alias rdtc='rake db:test:clone'
alias rdtp='rake db:test:prepare'
alias rdmtc='rake db:migrate db:test:clone'
alias rlc='rake log:clear'
alias rn='rake notes'
alias rr='rake routes'
# legacy stuff
alias ss='thin --stats "/thin/stats" start'
alias sg='ruby script/generate'
alias sd='ruby script/destroy'
alias sp='ruby script/plugin'
alias sr='ruby script/runner'
alias ssp='ruby script/spec'
alias sc='ruby script/console'
alias sd='ruby script/server --debugger'
function remote_console() {
/usr/bin/env ssh $1 "( cd $2 && ruby script/console production )"
}
|
Techwolf12/dotfiles
|
zsh/oh-my-zsh/plugins/rails/rails.plugin.zsh
|
Shell
|
gpl-2.0
| 1,658 |
#!/bin/sh
#
# Copyright (c) 2006 Junio C Hamano
#
test_description='git grep various.
'
. ./test-lib.sh
cat >hello.c <<EOF
#include <stdio.h>
int main(int argc, const char **argv)
{
printf("Hello world.\n");
return 0;
/* char ?? */
}
EOF
test_expect_success setup '
{
echo foo mmap bar
echo foo_mmap bar
echo foo_mmap bar mmap
echo foo mmap bar_mmap
echo foo_mmap bar mmap baz
} >file &&
{
echo Hello world
echo HeLLo world
echo Hello_world
echo HeLLo_world
} >hello_world &&
{
echo "a+b*c"
echo "a+bc"
echo "abc"
} >ab &&
echo vvv >v &&
echo ww w >w &&
echo x x xx x >x &&
echo y yy >y &&
echo zzz > z &&
mkdir t &&
echo test >t/t &&
echo vvv >t/v &&
mkdir t/a &&
echo vvv >t/a/v &&
{
echo "line without leading space1"
echo " line with leading space1"
echo " line with leading space2"
echo " line with leading space3"
echo "line without leading space2"
} >space &&
git add . &&
test_tick &&
git commit -m initial
'
test_expect_success 'grep should not segfault with a bad input' '
test_must_fail git grep "("
'
for H in HEAD ''
do
case "$H" in
HEAD) HC='HEAD:' L='HEAD' ;;
'') HC= L='in working tree' ;;
esac
test_expect_success "grep -w $L" '
{
echo ${HC}file:1:foo mmap bar
echo ${HC}file:3:foo_mmap bar mmap
echo ${HC}file:4:foo mmap bar_mmap
echo ${HC}file:5:foo_mmap bar mmap baz
} >expected &&
git -c grep.linenumber=false grep -n -w -e mmap $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L" '
{
echo ${HC}file:1:foo mmap bar
echo ${HC}file:3:foo_mmap bar mmap
echo ${HC}file:4:foo mmap bar_mmap
echo ${HC}file:5:foo_mmap bar mmap baz
} >expected &&
git -c grep.linenumber=true grep -w -e mmap $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L" '
{
echo ${HC}file:foo mmap bar
echo ${HC}file:foo_mmap bar mmap
echo ${HC}file:foo mmap bar_mmap
echo ${HC}file:foo_mmap bar mmap baz
} >expected &&
git -c grep.linenumber=true grep --no-line-number -w -e mmap $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (w)" '
: >expected &&
test_must_fail git grep -n -w -e "^w" >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (x)" '
{
echo ${HC}x:1:x x xx x
} >expected &&
git grep -n -w -e "x xx* x" $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (y-1)" '
{
echo ${HC}y:1:y yy
} >expected &&
git grep -n -w -e "^y" $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (y-2)" '
: >expected &&
if git grep -n -w -e "^y y" $H >actual
then
echo should not have matched
cat actual
false
else
test_cmp expected actual
fi
'
test_expect_success "grep -w $L (z)" '
: >expected &&
if git grep -n -w -e "^z" $H >actual
then
echo should not have matched
cat actual
false
else
test_cmp expected actual
fi
'
test_expect_success "grep $L (t-1)" '
echo "${HC}t/t:1:test" >expected &&
git grep -n -e test $H >actual &&
test_cmp expected actual
'
test_expect_success "grep $L (t-2)" '
echo "${HC}t:1:test" >expected &&
(
cd t &&
git grep -n -e test $H
) >actual &&
test_cmp expected actual
'
test_expect_success "grep $L (t-3)" '
echo "${HC}t/t:1:test" >expected &&
(
cd t &&
git grep --full-name -n -e test $H
) >actual &&
test_cmp expected actual
'
test_expect_success "grep -c $L (no /dev/null)" '
! git grep -c test $H | grep /dev/null
'
test_expect_success "grep --max-depth -1 $L" '
{
echo ${HC}t/a/v:1:vvv
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth -1 -n -e vvv $H >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 $L" '
{
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- '*' $L" '
{
echo ${HC}t/a/v:1:vvv
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- "*" >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 1 $L" '
{
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 1 -n -e vvv $H >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- t $L" '
{
echo ${HC}t/v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- t >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- . t $L" '
{
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- . t >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- t . $L" '
{
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- t . >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=false" '
echo "ab:a+bc" >expected &&
git -c grep.extendedRegexp=false grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=true" '
echo "ab:abc" >expected &&
git -c grep.extendedRegexp=true grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patterntype=basic" '
echo "ab:a+bc" >expected &&
git -c grep.patterntype=basic grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patterntype=extended" '
echo "ab:abc" >expected &&
git -c grep.patterntype=extended grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patterntype=fixed" '
echo "ab:a+b*c" >expected &&
git -c grep.patterntype=fixed grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE "grep $L with grep.patterntype=perl" '
echo "ab:a+b*c" >expected &&
git -c grep.patterntype=perl grep "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patternType=default and grep.extendedRegexp=true" '
echo "ab:abc" >expected &&
git \
-c grep.patternType=default \
-c grep.extendedRegexp=true \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=true and grep.patternType=default" '
echo "ab:abc" >expected &&
git \
-c grep.extendedRegexp=true \
-c grep.patternType=default \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep $L with grep.patternType=extended and grep.extendedRegexp=false' '
echo "ab:abc" >expected &&
git \
-c grep.patternType=extended \
-c grep.extendedRegexp=false \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep $L with grep.patternType=basic and grep.extendedRegexp=true' '
echo "ab:a+bc" >expected &&
git \
-c grep.patternType=basic \
-c grep.extendedRegexp=true \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep $L with grep.extendedRegexp=false and grep.patternType=extended' '
echo "ab:abc" >expected &&
git \
-c grep.extendedRegexp=false \
-c grep.patternType=extended \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep $L with grep.extendedRegexp=true and grep.patternType=basic' '
echo "ab:a+bc" >expected &&
git \
-c grep.extendedRegexp=true \
-c grep.patternType=basic \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
done
cat >expected <<EOF
file
EOF
test_expect_success 'grep -l -C' '
git grep -l -C1 foo >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:5
EOF
test_expect_success 'grep -l -C' '
git grep -c -C1 foo >actual &&
test_cmp expected actual
'
test_expect_success 'grep -L -C' '
git ls-files >expected &&
git grep -L -C1 nonexistent_string >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar_mmap
EOF
test_expect_success 'grep -e A --and -e B' '
git grep -e "foo mmap" --and -e bar_mmap >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo_mmap bar mmap
file:foo_mmap bar mmap baz
EOF
test_expect_success 'grep ( -e A --or -e B ) --and -e B' '
git grep \( -e foo_ --or -e baz \) \
--and -e " mmap" >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar
EOF
test_expect_success 'grep -e A --and --not -e B' '
git grep -e "foo mmap" --and --not -e bar_mmap >actual &&
test_cmp expected actual
'
test_expect_success 'grep should ignore GREP_OPTIONS' '
GREP_OPTIONS=-v git grep " mmap bar\$" >actual &&
test_cmp expected actual
'
test_expect_success 'grep -f, non-existent file' '
test_must_fail git grep -f patterns
'
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
file:foo_mmap bar mmap
file:foo mmap bar_mmap
file:foo_mmap bar mmap baz
EOF
cat >pattern <<EOF
mmap
EOF
test_expect_success 'grep -f, one pattern' '
git grep -f pattern >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
file:foo_mmap bar mmap
file:foo mmap bar_mmap
file:foo_mmap bar mmap baz
t/a/v:vvv
t/v:vvv
v:vvv
EOF
cat >patterns <<EOF
mmap
vvv
EOF
test_expect_success 'grep -f, multiple patterns' '
git grep -f patterns >actual &&
test_cmp expected actual
'
test_expect_success 'grep, multiple patterns' '
git grep "$(cat patterns)" >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
file:foo_mmap bar mmap
file:foo mmap bar_mmap
file:foo_mmap bar mmap baz
t/a/v:vvv
t/v:vvv
v:vvv
EOF
cat >patterns <<EOF
mmap
vvv
EOF
test_expect_success 'grep -f, ignore empty lines' '
git grep -f patterns >actual &&
test_cmp expected actual
'
test_expect_success 'grep -f, ignore empty lines, read patterns from stdin' '
git grep -f - <patterns >actual &&
test_cmp expected actual
'
cat >expected <<EOF
y:y yy
--
z:zzz
EOF
test_expect_success 'grep -q, silently report matches' '
>empty &&
git grep -q mmap >actual &&
test_cmp empty actual &&
test_must_fail git grep -q qfwfq >actual &&
test_cmp empty actual
'
test_expect_success 'grep -C1 hunk mark between files' '
git grep -C1 "^[yz]" >actual &&
test_cmp expected actual
'
test_expect_success 'log grep setup' '
echo a >>file &&
test_tick &&
GIT_AUTHOR_NAME="With * Asterisk" \
GIT_AUTHOR_EMAIL="[email protected]" \
git commit -a -m "second" &&
echo a >>file &&
test_tick &&
git commit -a -m "third" &&
echo a >>file &&
test_tick &&
GIT_AUTHOR_NAME="Night Fall" \
GIT_AUTHOR_EMAIL="[email protected]" \
git commit -a -m "fourth"
'
test_expect_success 'log grep (1)' '
git log --author=author --pretty=tformat:%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (2)' '
git log --author=" * " -F --pretty=tformat:%s >actual &&
{
echo second
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (3)' '
git log --author="^A U" --pretty=tformat:%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (4)' '
git log --author="frotz\.com>$" --pretty=tformat:%s >actual &&
{
echo second
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (5)' '
git log --author=Thor -F --pretty=tformat:%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (6)' '
git log --author=-0700 --pretty=tformat:%s >actual &&
>expect &&
test_cmp expect actual
'
test_expect_success 'log grep (7)' '
git log -g --grep-reflog="commit: third" --pretty=tformat:%s >actual &&
echo third >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (8)' '
git log -g --grep-reflog="commit: third" --grep-reflog="commit: second" --pretty=tformat:%s >actual &&
{
echo third && echo second
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (9)' '
git log -g --grep-reflog="commit: third" --author="Thor" --pretty=tformat:%s >actual &&
echo third >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (9)' '
git log -g --grep-reflog="commit: third" --author="non-existant" --pretty=tformat:%s >actual &&
: >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep-reflog can only be used under -g' '
test_must_fail git log --grep-reflog="commit: third"
'
test_expect_success 'log with multiple --grep uses union' '
git log --grep=i --grep=r --format=%s >actual &&
{
echo fourth && echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --all-match with multiple --grep uses intersection' '
git log --all-match --grep=i --grep=r --format=%s >actual &&
{
echo third
} >expect &&
test_cmp expect actual
'
test_expect_success 'log with multiple --author uses union' '
git log --author="Thor" --author="Aster" --format=%s >actual &&
{
echo third && echo second && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --all-match with multiple --author still uses union' '
git log --all-match --author="Thor" --author="Aster" --format=%s >actual &&
{
echo third && echo second && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep --author uses intersection' '
# grep matches only third and fourth
# author matches only initial and third
git log --author="A U Thor" --grep=r --format=%s >actual &&
{
echo third
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep --grep --author takes union of greps and intersects with author' '
# grep matches initial and second but not third
# author matches only initial and third
git log --author="A U Thor" --grep=s --grep=l --format=%s >actual &&
{
echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log ---all-match -grep --author --author still takes union of authors and intersects with grep' '
# grep matches only initial and third
# author matches all but second
git log --all-match --author="Thor" --author="Night" --grep=i --format=%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep --author --author takes union of authors and intersects with grep' '
# grep matches only initial and third
# author matches all but second
git log --author="Thor" --author="Night" --grep=i --format=%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --all-match --grep --grep --author takes intersection' '
# grep matches only third
# author matches only initial and third
git log --all-match --author="A U Thor" --grep=i --grep=r --format=%s >actual &&
{
echo third
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --author does not search in timestamp' '
: >expect &&
git log --author="$GIT_AUTHOR_DATE" >actual &&
test_cmp expect actual
'
test_expect_success 'log --committer does not search in timestamp' '
: >expect &&
git log --committer="$GIT_COMMITTER_DATE" >actual &&
test_cmp expect actual
'
test_expect_success 'grep with CE_VALID file' '
git update-index --assume-unchanged t/t &&
rm t/t &&
test "$(git grep test)" = "t/t:test" &&
git update-index --no-assume-unchanged t/t &&
git checkout t/t
'
cat >expected <<EOF
hello.c=#include <stdio.h>
hello.c: return 0;
EOF
test_expect_success 'grep -p with userdiff' '
git config diff.custom.funcname "^#" &&
echo "hello.c diff=custom" >.gitattributes &&
git grep -p return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c=int main(int argc, const char **argv)
hello.c: return 0;
EOF
test_expect_success 'grep -p' '
rm -f .gitattributes &&
git grep -p return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c-#include <stdio.h>
hello.c=int main(int argc, const char **argv)
hello.c-{
hello.c- printf("Hello world.\n");
hello.c: return 0;
EOF
test_expect_success 'grep -p -B5' '
git grep -p -B5 return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c=int main(int argc, const char **argv)
hello.c-{
hello.c- printf("Hello world.\n");
hello.c: return 0;
hello.c- /* char ?? */
hello.c-}
EOF
test_expect_success 'grep -W' '
git grep -W return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c= printf("Hello world.\n");
hello.c: return 0;
hello.c- /* char ?? */
EOF
test_expect_success 'grep -W with userdiff' '
test_when_finished "rm -f .gitattributes" &&
git config diff.custom.xfuncname "(printf.*|})$" &&
echo "hello.c diff=custom" >.gitattributes &&
git grep -W return >actual &&
test_cmp expected actual
'
test_expect_success 'grep from a subdirectory to search wider area (1)' '
mkdir -p s &&
(
cd s && git grep "x x x" ..
)
'
test_expect_success 'grep from a subdirectory to search wider area (2)' '
mkdir -p s &&
(
cd s || exit 1
( git grep xxyyzz .. >out ; echo $? >status )
! test -s out &&
test 1 = $(cat status)
)
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
EOF
test_expect_success 'grep -Fi' '
git grep -Fi "CHAR *" >actual &&
test_cmp expected actual
'
test_expect_success 'outside of git repository' '
rm -fr non &&
mkdir -p non/git/sub &&
echo hello >non/git/file1 &&
echo world >non/git/sub/file2 &&
{
echo file1:hello &&
echo sub/file2:world
} >non/expect.full &&
echo file2:world >non/expect.sub &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non/git" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git grep o &&
git grep --no-index o >../actual.full &&
test_cmp ../expect.full ../actual.full
cd sub &&
test_must_fail git grep o &&
git grep --no-index o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub
) &&
echo ".*o*" >non/git/.gitignore &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non/git" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git grep o &&
git grep --no-index --exclude-standard o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
{
echo ".gitignore:.*o*"
cat ../expect.full
} >../expect.with.ignored &&
git grep --no-index --no-exclude o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
test_expect_success 'inside git repository but with --no-index' '
rm -fr is &&
mkdir -p is/git/sub &&
echo hello >is/git/file1 &&
echo world >is/git/sub/file2 &&
echo ".*o*" >is/git/.gitignore &&
{
echo file1:hello &&
echo sub/file2:world
} >is/expect.unignored &&
{
echo ".gitignore:.*o*" &&
cat is/expect.unignored
} >is/expect.full &&
: >is/expect.empty &&
echo file2:world >is/expect.sub &&
(
cd is/git &&
git init &&
test_must_fail git grep o >../actual.full &&
test_cmp ../expect.empty ../actual.full &&
git grep --untracked o >../actual.unignored &&
test_cmp ../expect.unignored ../actual.unignored &&
git grep --no-index o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
git grep --no-index --exclude-standard o >../actual.unignored &&
test_cmp ../expect.unignored ../actual.unignored &&
cd sub &&
test_must_fail git grep o >../../actual.sub &&
test_cmp ../../expect.empty ../../actual.sub &&
git grep --no-index o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub &&
git grep --untracked o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub
)
'
test_expect_success 'setup double-dash tests' '
cat >double-dash <<EOF &&
--
->
other
EOF
git add double-dash
'
cat >expected <<EOF
double-dash:->
EOF
test_expect_success 'grep -- pattern' '
git grep -- "->" >actual &&
test_cmp expected actual
'
test_expect_success 'grep -- pattern -- pathspec' '
git grep -- "->" -- double-dash >actual &&
test_cmp expected actual
'
test_expect_success 'grep -e pattern -- path' '
git grep -e "->" -- double-dash >actual &&
test_cmp expected actual
'
cat >expected <<EOF
double-dash:--
EOF
test_expect_success 'grep -e -- -- path' '
git grep -e -- -- double-dash >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
hello.c: printf("Hello world.\n");
EOF
test_expect_success LIBPCRE 'grep --perl-regexp pattern' '
git grep --perl-regexp "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep -P pattern' '
git grep -P "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.extendedRegexp=true' '
>empty &&
test_must_fail git -c grep.extendedregexp=true \
grep "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp empty actual
'
test_expect_success LIBPCRE 'grep -P pattern with grep.extendedRegexp=true' '
git -c grep.extendedregexp=true \
grep -P "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep -P -v pattern' '
{
echo "ab:a+b*c"
echo "ab:a+bc"
} >expected &&
git grep -P -v "abc" ab >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep -P -i pattern' '
cat >expected <<-EOF &&
hello.c: printf("Hello world.\n");
EOF
git grep -P -i "PRINTF\([^\d]+\)" hello.c >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep -P -w pattern' '
{
echo "hello_world:Hello world"
echo "hello_world:HeLLo world"
} >expected &&
git grep -P -w "He((?i)ll)o" hello_world >actual &&
test_cmp expected actual
'
test_expect_success 'grep -G invalidpattern properly dies ' '
test_must_fail git grep -G "a["
'
test_expect_success 'grep invalidpattern properly dies with grep.patternType=basic' '
test_must_fail git -c grep.patterntype=basic grep "a["
'
test_expect_success 'grep -E invalidpattern properly dies ' '
test_must_fail git grep -E "a["
'
test_expect_success 'grep invalidpattern properly dies with grep.patternType=extended' '
test_must_fail git -c grep.patterntype=extended grep "a["
'
test_expect_success LIBPCRE 'grep -P invalidpattern properly dies ' '
test_must_fail git grep -P "a["
'
test_expect_success LIBPCRE 'grep invalidpattern properly dies with grep.patternType=perl' '
test_must_fail git -c grep.patterntype=perl grep "a["
'
test_expect_success 'grep -G -E -F pattern' '
echo "ab:a+b*c" >expected &&
git grep -G -E -F "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=basic, =extended, =fixed' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=basic \
-c grep.patterntype=extended \
-c grep.patterntype=fixed \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -E -F -G pattern' '
echo "ab:a+bc" >expected &&
git grep -E -F -G "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=extended, =fixed, =basic' '
echo "ab:a+bc" >expected &&
git \
-c grep.patterntype=extended \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -F -G -E pattern' '
echo "ab:abc" >expected &&
git grep -F -G -E "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=fixed, =basic, =extended' '
echo "ab:abc" >expected &&
git \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
-c grep.patterntype=extended \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -G -F -P -E pattern' '
>empty &&
test_must_fail git grep -G -F -P -E "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp empty actual
'
test_expect_success 'grep pattern with grep.patternType=fixed, =basic, =perl, =extended' '
>empty &&
test_must_fail git \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
-c grep.patterntype=perl \
-c grep.patterntype=extended \
grep "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp empty actual
'
test_expect_success LIBPCRE 'grep -G -F -E -P pattern' '
echo "ab:a+b*c" >expected &&
git grep -G -F -E -P "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep pattern with grep.patternType=fixed, =basic, =extended, =perl' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
-c grep.patterntype=extended \
-c grep.patterntype=perl \
grep "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep -P pattern with grep.patternType=fixed' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=fixed \
grep -P "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -F pattern with grep.patternType=basic' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=basic \
grep -F "*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -G pattern with grep.patternType=fixed' '
{
echo "ab:a+b*c"
echo "ab:a+bc"
} >expected &&
git \
-c grep.patterntype=fixed \
grep -G "a+b" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -E pattern with grep.patternType=fixed' '
{
echo "ab:a+b*c"
echo "ab:a+bc"
echo "ab:abc"
} >expected &&
git \
-c grep.patterntype=fixed \
grep -E "a+" ab >actual &&
test_cmp expected actual
'
test_config() {
git config "$1" "$2" &&
test_when_finished "git config --unset $1"
}
cat >expected <<EOF
hello.c<RED>:<RESET>int main(int argc, const char **argv)
hello.c<RED>-<RESET>{
<RED>--<RESET>
hello.c<RED>:<RESET> /* char ?? */
hello.c<RED>-<RESET>}
<RED>--<RESET>
hello_world<RED>:<RESET>Hello_world
hello_world<RED>-<RESET>HeLLo_world
EOF
test_expect_success 'grep --color, separator' '
test_config color.grep.context normal &&
test_config color.grep.filename normal &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.match normal &&
test_config color.grep.selected normal &&
test_config color.grep.separator red &&
git grep --color=always -A1 -e char -e lo_w hello.c hello_world |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
hello.c: /* char ?? */
hello_world:Hello_world
EOF
test_expect_success 'grep --break' '
git grep --break -e char -e lo_w hello.c hello_world >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
hello.c-{
--
hello.c: /* char ?? */
hello.c-}
hello_world:Hello_world
hello_world-HeLLo_world
EOF
test_expect_success 'grep --break with context' '
git grep --break -A1 -e char -e lo_w hello.c hello_world >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c
int main(int argc, const char **argv)
/* char ?? */
hello_world
Hello_world
EOF
test_expect_success 'grep --heading' '
git grep --heading -e char -e lo_w hello.c hello_world >actual &&
test_cmp expected actual
'
cat >expected <<EOF
<BOLD;GREEN>hello.c<RESET>
2:int main(int argc, const <BLACK;BYELLOW>char<RESET> **argv)
6: /* <BLACK;BYELLOW>char<RESET> ?? */
<BOLD;GREEN>hello_world<RESET>
3:Hel<BLACK;BYELLOW>lo_w<RESET>orld
EOF
test_expect_success 'mimic ack-grep --group' '
test_config color.grep.context normal &&
test_config color.grep.filename "bold green" &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.match "black yellow" &&
test_config color.grep.selected normal &&
test_config color.grep.separator normal &&
git grep --break --heading -n --color \
-e char -e lo_w hello.c hello_world |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
space: line with leading space1
space: line with leading space2
space: line with leading space3
EOF
test_expect_success LIBPCRE 'grep -E "^ "' '
git grep -E "^ " space >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE 'grep -P "^ "' '
git grep -P "^ " space >actual &&
test_cmp expected actual
'
test_done
|
IsCoolEntertainment/debpkg_git
|
t/t7810-grep.sh
|
Shell
|
gpl-2.0
| 28,036 |
#!/bin/sh
if test $# = 1 ; then
ORIGINAL=$1
else
echo "Usage: update.sh /path/to/libcharset" 1>&2
exit 1
fi
if test -f $ORIGINAL/lib/localcharset.c ; then : ; else
echo "Usage: update.sh /path/to/libcharset" 1>&2
exit 1
fi
VERSION=`grep VERSION= $ORIGINAL/configure.ac | sed s/VERSION=//`
for i in localcharset.c ref-add.sin ref-del.sin config.charset ; do
cp $ORIGINAL/lib/$i .
done
for i in libcharset.h localcharset.h ; do
cp $ORIGINAL/include/$i.in ./$i
done
for i in codeset.m4 glibc21.m4 ; do
cp $ORIGINAL/m4/$i .
done
patch -p0 < libcharset-glib.patch
echo "dnl From libcharset $VERSION" > ../../aclibcharset.m4
|
xbmc/atv2
|
xbmc/lib/libmms/glib-2.20.4/glib/libcharset/update.sh
|
Shell
|
gpl-2.0
| 646 |
#!/bin/bash
#
# This installs OpenVPN plus certificates.
# Resulting client config files in CFG_VPN_CONF from config
#
#set -x
cd $(dirname $0)
. ../etc/common.sh
#
# get the IPv4 address of the interface with the default route
#
function get_external_ipv4 () {
# try to get an external address first
# "Your IP address is 12.13.14.15"
external_ip=$(curl -s http://whatismijnip.nl)
if [ $? -eq 0 ]; then
external_ip=$(echo $external_ip | cut -d " " -f 5)
else
unset external_ip
fi
# if it did not work, get the IP which has the default route
if [ -z $external_ip ]; then
iface=$(ip route | awk '/^default / { for(i=0;i<NF;i++) { if ($i == "dev") { print $(i+1); next; }}}')
echo $(ifconfig $iface | sed -rn 's/.*r:([^ ]+) .*/\1/p')
else
echo $external_ip
fi
}
#
# print the certificate given in $2
# wrap in tags provided in $1
# (note that END is a perfectly valid sequence in a key
# so the BEGIN and END patterns must be specific!)
#
function print_cert () {
echo "<$1>"
cat $2 | sed -n '/^-----BEGIN .*-----$/,/^-----END .*-----$/p'
echo "</$1>"
}
#
# print the subnet mask based on the CIDR bits
#
function print_mask () {
echo $(python -c "import socket, struct; print socket.inet_ntoa(struct.pack(\">I\", (0xffffffff << (32 - $1)) & 0xffffffff))")
}
#
# print the IPv4 network in $1 and replace the
# last octet with the octed in $2
#
function print_ipv4_net () {
echo $(echo -n $1 | cut -d. -f1-3)"."$2
}
#
# install required packages
#
apt-get -y install openvpn easy-rsa
#
# certificate / CA stuff
#
cd /usr/share/easy-rsa/
sed -ri 's/(^export KEY_CITY=")(.*)"/\1San Jose"/' ./vars
sed -ri 's/(^export KEY_OU=")(.*)"/\1DevNet Sandbox"/' ./vars
sed -ri 's/(^export KEY_ORG=")(.*)"/\1Cisco"/' ./vars
sed -ri 's/(^export KEY_EMAIL=")(.*)"/\1root@'${CFG_HOSTNAME}.${CFG_DOMAIN}'"/' ./vars
sed -ri 's/(^export KEY_EXPIRE=)(.*)/\1365/' ./vars
sed -ri 's/(^export CA_EXPIRE=)(.*)/\1365/' ./vars
. ./vars
./clean-all
./pkitool --initca
./pkitool --server ${CFG_HOSTNAME}.${CFG_DOMAIN}
./pkitool virl-sandbox-client
./build-dh
cd keys/
cp ca.crt ${CFG_HOSTNAME}.${CFG_DOMAIN}.* dh*.pem /etc/openvpn/
#
# create the server config file
# (duplicate-cn allows the cert to be used multiple times)
#
cat >/etc/openvpn/server.conf <<EOF
port $CFG_VPN_PORT
proto $CFG_VPN_PROT
dev ${CFG_VPN_DEV}0
duplicate-cn
ca /etc/openvpn/ca.crt
dh /etc/openvpn/dh2048.pem
key /etc/openvpn/${CFG_HOSTNAME}.${CFG_DOMAIN}.key
cert /etc/openvpn/${CFG_HOSTNAME}.${CFG_DOMAIN}.crt
max-clients 20
keepalive 10 60
persist-tun
verb 1
mute 3
EOF
## push "dhcp-option DNS 8.8.8.8"
## push "dhcp-option DOMAIN virl.lab"
#
# the networks here must match IP networks defined in /etc/virl.ini
# also different start procedure is required for tap vs tun
#
if [[ $CFG_VPN_DEV =~ tun ]]; then
vpn_gateway="" # no gateway needed for L3
vpn_network=$(echo $CFG_VPN_L3_NET | cut -d/ -f1)
vpn_netcidr=$(echo $CFG_VPN_L3_NET | cut -d/ -f2)
vpn_netmask=$(print_mask $vpn_netcidr)
echo "server $vpn_network $vpn_netmask" >>/etc/openvpn/server.conf
ufw allow in on ${CFG_VPN_DEV}0
else
vpn_gateway=$(crudini --get /etc/virl.ini DEFAULT l2_network_gateway | cut -d/ -f1)
vpn_network=$(crudini --get /etc/virl.ini DEFAULT l2_network | cut -d/ -f1)
vpn_netcidr=$(crudini --get /etc/virl.ini DEFAULT l2_network | cut -d/ -f2)
vpn_netmask=$(print_mask $vpn_netcidr)
echo "server-bridge $vpn_network $vpn_netmask" \
"$(print_ipv4_net $vpn_network $CFG_VPN_L2_LO)" \
"$(print_ipv4_net $vpn_network $CFG_VPN_L2_HI)" >>/etc/openvpn/server.conf
echo "up /etc/openvpn/bridge-up.sh" >>/etc/openvpn/server.conf
cat >/etc/openvpn/bridge-up.sh <<EOF
#!/bin/bash
#
# If using a bridged interface, some more stuff is required.
# The bridge will be attached to FLAT == l2_port interface.
# l2_port is defined in /etc/virl.ini
#
# First, get the L3 interface attached to FLAT.
# It might take a while until Neutron brings it up so we wait for it.
#
# \$1 = tap_dev
# \$2 = tap_mtu
# \$3 = link_mtu
# \$4 = ifconfig_local_ip
# \$5 = ifconfig_netmask
# \$6 = [ init | restart ]
#
l2_port=\$(crudini --get /etc/virl.ini DEFAULT l2_port)
flat=""
while [ "\$flat" = "" ]; do
flat=\$(brctl show | sed -rne '/'\$l2_port'/s/^(brq[a-z0-9\-]{11}).*'\$l2_port'$/\1/p')
if [ "\$flat" = "" ]; then
echo "OpenVPN: waiting for FLAT bridge to come up..."
sleep 5
fi
done
# add the VPN Tap device to the Bridge
brctl addif \$flat \$1
# bring the VPN Tap device up
ifconfig \$1 up mtu \$2
# make sure that the bridge interfaces are not subject
# to iptables filtering
sysctl -w net.bridge.bridge-nf-call-iptables=0
sysctl -w net.bridge.bridge-nf-call-ip6tables=0
# add the bridge to iptables
ufw allow in on \$flat
exit
EOF
# make it executable
chmod u+x /etc/openvpn/bridge-up.sh
# Change priority for OpenVPN start (default=16) but
# at that time Neutron has not been started!
# For the tap interface to come up successfully the
# L3 Neutron Router Interfaces have to be configured first!
# So we move the OpenVPN start to the end of the line.
update-rc.d -f openvpn remove
update-rc.d openvpn start 99 2 3 4 5 . stop 80 0 1 6 .
fi
#
# we need to push a route to the clients with a super net to
# all VIRL internal networks.
#
vpn_network=$(echo $CFG_VPN_ROUTE | cut -d/ -f1)
vpn_netcidr=$(echo $CFG_VPN_ROUTE | cut -d/ -f2)
vpn_netmask=$(print_mask $vpn_netcidr)
echo "push \"route $vpn_network $vpn_netmask $vpn_gateway\"" >>/etc/openvpn/server.conf
#
# client config file
#
cat >$CFG_VPN_CONF <<EOF
# VIRL OpenVPN Client Configuration
client
dev $CFG_VPN_DEV
port $CFG_VPN_PORT
proto $CFG_VPN_PROT
persist-tun
verb 2
mute 3
nobind
reneg-sec 604800
# sndbuf 100000
# rcvbuf 100000
# Verify server certificate by checking
# that the certicate has the nsCertType
# field set to "server". This is an
# important precaution to protect against
# a potential attack discussed here:
# http://openvpn.net/howto.html#mitm
#
# To use this feature, you will need to generate
# your server certificates with the nsCertType
# field set to "server". The build-key-server
# script in the easy-rsa folder will do this.
ns-cert-type server
# If you are connecting through an
# HTTP proxy to reach the actual OpenVPN
# server, put the proxy server/IP and
# port number here. See the man page
# if your proxy server requires
# authentication.
;http-proxy-retry # retry on connection failures
;http-proxy [proxy server] [proxy port #]
EOF
# remaining config stuff
echo -n "remote " >>$CFG_VPN_CONF
get_external_ipv4 >>$CFG_VPN_CONF
print_cert "ca" ca.crt >>$CFG_VPN_CONF
print_cert "cert" virl-sandbox-client.crt >>$CFG_VPN_CONF
print_cert "key" virl-sandbox-client.key >>$CFG_VPN_CONF
# start OpenVPN service
# (if the server reboots after this step then this is not needed)
#service openvpn restart
exit $STATE_REBOOT
|
rschmied/sb-bootstrap
|
stages/04-openvpn.sh
|
Shell
|
isc
| 6,891 |
#!/bin/sh
xrandr --output VIRTUAL1 --off --output eDP1 --mode 1440x900 --pos 1200x1080 --rotate normal --output DP1 --off --output HDMI2 --mode 1920x1080 --pos 1920x0 --rotate normal --output HDMI1 --mode 1920x1080 --pos 0x0 --rotate normal --output DP2 --off
|
markwkm/dotfiles
|
misc/mbp11,1/xrandr/home-2hdmi.sh
|
Shell
|
mit
| 260 |
#!bin/bash
echo Welcome to Love Shader Converter,
echo this file needs to be executed from the same directory as the EXE.
echo
echo Please enter the shader file:
read file_name
./LoveShaderConverter $file_name
|
tsteinholz/LoveShaderConverter
|
scripts/LoveShaderConverter.sh
|
Shell
|
mit
| 210 |
#!/bin/bash
#
# Example of commands to process multi-parametric data of the spinal cord.
#
# Please note that this batch script has a lot of redundancy and should not
# be used as a pipeline for regular processing. For example, there is no need
# to process both t1 and t2 to extract CSA values.
#
# For information about acquisition parameters, see: https://osf.io/wkdym/
# N.B. The parameters are set for these type of data. With your data, parameters
# might be slightly different.
#
# Usage:
#
# [option] $SCT_DIR/batch_processing.sh
#
# Prevent (re-)downloading sct_example_data:
# SCT_BP_DOWNLOAD=0 $SCT_DIR/batch_processing.sh
#
# Specify quality control (QC) folder (Default is ~/qc_batch_processing):
# SCT_BP_QC_FOLDER=/user/toto/my_qc_folder $SCT_DIR/batch_processing.sh
# Abort on error
set -ve
# For full verbose, uncomment the next line
# set -x
# Fetch OS type
if uname -a | grep -i darwin > /dev/null 2>&1; then
# OSX
open_command="open"
elif uname -a | grep -i linux > /dev/null 2>&1; then
# Linux
open_command="xdg-open"
fi
# Check if users wants to use his own data
if [[ -z "$SCT_BP_DOWNLOAD" ]]; then
SCT_BP_DOWNLOAD=1
fi
# QC folder
if [[ -z "$SCT_BP_QC_FOLDER" ]]; then
SCT_BP_QC_FOLDER=`pwd`/"qc_example_data"
fi
# Remove QC folder
if [ -z "$SCT_BP_NO_REMOVE_QC" -a -d "$SCT_BP_QC_FOLDER" ]; then
echo "Removing $SCT_BP_QC_FOLDER folder."
rm -rf "$SCT_BP_QC_FOLDER"
fi
# get starting time:
start=`date +%s`
# download example data
if [[ "$SCT_BP_DOWNLOAD" == "1" ]]; then
sct_download_data -d sct_example_data
fi
cd sct_example_data
# t2
# ===========================================================================================
cd t2
# Segment spinal cord
sct_deepseg_sc -i t2.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Tips: If you are not satisfied with the results you can try with another algorithm:
# sct_propseg -i t2.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Vertebral labeling
# Tips: for manual initialization of labeling by clicking at disc C2-C3, use flag -initc2
sct_label_vertebrae -i t2.nii.gz -s t2_seg.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Create labels at in the cord at C2 and C5 mid-vertebral levels
sct_label_utils -i t2_seg_labeled.nii.gz -vert-body 2,5 -o labels_vert.nii.gz
# Tips: you can also create labels manually using:
# sct_label_utils -i t2.nii.gz -create-viewer 2,5 -o labels_vert.nii.gz
# Register to template
sct_register_to_template -i t2.nii.gz -s t2_seg.nii.gz -l labels_vert.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Tips: If you are not satisfied with the results, you can tweak registration parameters.
# For example here, we would like to take into account the rotation of the cord, as well as
# adding a 3rd registration step that uses the image intensity (not only cord segmentations).
# so we could do something like this:
# sct_register_multimodal -i $SCT_DIR/data/PAM50/template/PAM50_t2s.nii.gz -iseg $SCT_DIR/data/PAM50/template/PAM50_cord.nii.gz -d t2s.nii.gz -dseg t2s_seg.nii.gz -param step=1,type=seg,algo=slicereg,smooth=3:step=2,type=seg,algo=bsplinesyn,slicewise=1,iter=3 -initwarp ../t2/warp_template2anat.nii.gz
# Warp template without the white matter atlas (we don't need it at this point)
sct_warp_template -d t2.nii.gz -w warp_template2anat.nii.gz -a 0
# Compute cross-sectional area (and other morphometry measures) for each slice
sct_process_segmentation -i t2_seg.nii.gz
# Compute cross-sectional area and average between C2 and C3 levels
sct_process_segmentation -i t2_seg.nii.gz -vert 2:3 -o csa_c2c3.csv
# Compute cross-sectionnal area based on distance from pontomedullary junction (PMJ)
# Detect PMJ
sct_detect_pmj -i t2.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Compute cross-section area at 60 mm from PMJ averaged on a 30 mm extent
sct_process_segmentation -i t2_seg.nii.gz -pmj t2_pmj.nii.gz -pmj-distance 60 -pmj-extent 30 -qc "$SCT_BP_QC_FOLDER" -qc-image t2.nii.gz -o csa_pmj.csv
# Go back to root folder
cd ..
# t2s (stands for t2-star)
# ===========================================================================================
cd t2s
# Spinal cord segmentation
sct_deepseg_sc -i t2s.nii.gz -c t2s -qc "$SCT_BP_QC_FOLDER"
# Segment gray matter
sct_deepseg_gm -i t2s.nii.gz -qc "$SCT_BP_QC_FOLDER"
# Register template->t2s (using warping field generated from template<->t2 registration)
sct_register_multimodal -i $SCT_DIR/data/PAM50/template/PAM50_t2s.nii.gz -iseg $SCT_DIR/data/PAM50/template/PAM50_cord.nii.gz -d t2s.nii.gz -dseg t2s_seg.nii.gz -param step=1,type=seg,algo=centermass:step=2,type=seg,algo=bsplinesyn,slicewise=1,iter=3:step=3,type=im,algo=syn,slicewise=1,iter=1,metric=CC -initwarp ../t2/warp_template2anat.nii.gz -initwarpinv ../t2/warp_anat2template.nii.gz
# rename warping fields for clarity
mv warp_PAM50_t2s2t2s.nii.gz warp_template2t2s.nii.gz
mv warp_t2s2PAM50_t2s.nii.gz warp_t2s2template.nii.gz
# Warp template
sct_warp_template -d t2s.nii.gz -w warp_template2t2s.nii.gz
# Subtract GM segmentation from cord segmentation to obtain WM segmentation
sct_maths -i t2s_seg.nii.gz -sub t2s_gmseg.nii.gz -o t2s_wmseg.nii.gz
# Compute cross-sectional area of the gray and white matter between C2 and C5
sct_process_segmentation -i t2s_wmseg.nii.gz -vert 2:5 -perlevel 1 -o csa_wm.csv
sct_process_segmentation -i t2s_gmseg.nii.gz -vert 2:5 -perlevel 1 -o csa_gm.csv
# OPTIONAL: Update template registration using information from gray matter segmentation
# # <<<
# # Register WM/GM template to WM/GM seg
# sct_register_multimodal -i $SCT_DIR/data/PAM50/template/PAM50_wm.nii.gz -d t2s_wmseg.nii.gz -dseg t2s_seg.nii.gz -param step=1,type=im,algo=syn,slicewise=1,iter=5 -initwarp warp_template2t2s.nii.gz -initwarpinv warp_t2s2template.nii.gz -qc "$SCT_BP_QC_FOLDER"
# # Rename warping fields for clarity
# mv warp_PAM50_wm2t2s_wmseg.nii.gz warp_template2t2s.nii.gz
# mv warp_t2s_wmseg2PAM50_wm.nii.gz warp_t2s2template.nii.gz
# # Warp template (this time corrected for internal structure)
# sct_warp_template -d t2s.nii.gz -w warp_template2t2s.nii.gz
# # >>>
cd ..
# t1
# ===========================================================================================
cd t1
# Segment spinal cord
sct_deepseg_sc -i t1.nii.gz -c t1 -qc "$SCT_BP_QC_FOLDER"
# Smooth spinal cord along superior-inferior axis
sct_smooth_spinalcord -i t1.nii.gz -s t1_seg.nii.gz
# Flatten cord in the right-left direction (to make nice figure)
sct_flatten_sagittal -i t1.nii.gz -s t1_seg.nii.gz
# Go back to root folder
cd ..
# mt
# ===========================================================================================
cd mt
# Get centerline from mt1 data
sct_get_centerline -i mt1.nii.gz -c t2
# sct_get_centerline -i mt1.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Create mask
sct_create_mask -i mt1.nii.gz -p centerline,mt1_centerline.nii.gz -size 45mm
# Crop data for faster processing
sct_crop_image -i mt1.nii.gz -m mask_mt1.nii.gz -o mt1_crop.nii.gz
# Segment spinal cord
sct_deepseg_sc -i mt1_crop.nii.gz -c t2 -qc "$SCT_BP_QC_FOLDER"
# Register mt0->mt1
# Tips: here we only use rigid transformation because both images have very similar sequence parameters. We don't want to use SyN/BSplineSyN to avoid introducing spurious deformations.
# Tips: here we input -dseg because it is needed by the QC report
sct_register_multimodal -i mt0.nii.gz -d mt1_crop.nii.gz -dseg mt1_crop_seg.nii.gz -param step=1,type=im,algo=rigid,slicewise=1,metric=CC -x spline -qc "$SCT_BP_QC_FOLDER"
# Register template->mt1
# Tips: here we only use the segmentations due to poor SC/CSF contrast at the bottom slice.
# Tips: First step: slicereg based on images, with large smoothing to capture potential motion between anat and mt, then at second step: bpslinesyn in order to adapt the shape of the cord to the mt modality (in case there are distortions between anat and mt).
sct_register_multimodal -i $SCT_DIR/data/PAM50/template/PAM50_t2.nii.gz -iseg $SCT_DIR/data/PAM50/template/PAM50_cord.nii.gz -d mt1_crop.nii.gz -dseg mt1_crop_seg.nii.gz -param step=1,type=seg,algo=slicereg,smooth=3:step=2,type=seg,algo=bsplinesyn,slicewise=1,iter=3 -initwarp ../t2/warp_template2anat.nii.gz -initwarpinv ../t2/warp_anat2template.nii.gz
# Rename warping fields for clarity
mv warp_PAM50_t22mt1_crop.nii.gz warp_template2mt.nii.gz
mv warp_mt1_crop2PAM50_t2.nii.gz warp_mt2template.nii.gz
# Warp template
sct_warp_template -d mt1_crop.nii.gz -w warp_template2mt.nii.gz -qc "$SCT_BP_QC_FOLDER"
# Compute mtr
sct_compute_mtr -mt0 mt0_reg.nii.gz -mt1 mt1_crop.nii.gz
# Register t1w->mt1
# Tips: We do not need to crop the t1w image before registration because step=0 of the registration is to put the source image in the space of the destination image (equivalent to cropping the t1w)
sct_register_multimodal -i t1w.nii.gz -d mt1_crop.nii.gz -dseg mt1_crop_seg.nii.gz -param step=1,type=im,algo=rigid,slicewise=1,metric=CC -x spline -qc "$SCT_BP_QC_FOLDER"
# Compute MTsat
# Tips: Check your TR and Flip Angle from the Dicom data
sct_compute_mtsat -mt mt1_crop.nii.gz -pd mt0_reg.nii.gz -t1 t1w_reg.nii.gz -trmt 30 -trpd 30 -trt1 15 -famt 9 -fapd 9 -fat1 15
# Extract MTR, T1 and MTsat within the white matter between C2 and C5.
# Tips: Here we use "-discard-neg-val 1" to discard inconsistent negative values in MTR calculation which are caused by noise.
sct_extract_metric -i mtr.nii.gz -method map -o mtr_in_wm.csv -l 51 -vert 2:5
sct_extract_metric -i mtsat.nii.gz -method map -o mtsat_in_wm.csv -l 51 -vert 2:5
sct_extract_metric -i t1map.nii.gz -method map -o t1_in_wm.csv -l 51 -vert 2:5
# Bring MTR to template space (e.g. for group mapping)
sct_apply_transfo -i mtr.nii.gz -d $SCT_DIR/data/PAM50/template/PAM50_t2.nii.gz -w warp_mt2template.nii.gz
# Go back to root folder
cd ..
# dmri
# ===========================================================================================
cd dmri
# bring t2 segmentation in dmri space to create mask (no optimization)
sct_maths -i dmri.nii.gz -mean t -o dmri_mean.nii.gz
sct_register_multimodal -i ../t2/t2_seg.nii.gz -d dmri_mean.nii.gz -identity 1 -x nn
# create mask to help moco and for faster processing
sct_create_mask -i dmri_mean.nii.gz -p centerline,t2_seg_reg.nii.gz -size 35mm
# crop data
sct_crop_image -i dmri.nii.gz -m mask_dmri_mean.nii.gz -o dmri_crop.nii.gz
# motion correction
# Tips: if data have very low SNR you can increase the number of successive images that are averaged into group with "-g". Also see: sct_dmri_moco -h
sct_dmri_moco -i dmri_crop.nii.gz -bvec bvecs.txt
# segmentation with propseg
sct_deepseg_sc -i dmri_crop_moco_dwi_mean.nii.gz -c dwi -qc "$SCT_BP_QC_FOLDER"
# Generate QC for sct_dmri_moco ('dmri_crop_moco_dwi_mean_seg.nii.gz' is needed to align each slice in the QC mosaic)
sct_qc -i dmri_crop.nii.gz -d dmri_crop_moco.nii.gz -s dmri_crop_moco_dwi_mean_seg.nii.gz -p sct_dmri_moco -qc "$SCT_BP_QC_FOLDER"
# Register template to dwi
# Tips: Again, here, we prefer to stick to segmentation-based registration. If there are susceptibility distortions in your EPI, then you might consider adding a third step with bsplinesyn or syn transformation for local adjustment.
sct_register_multimodal -i $SCT_DIR/data/PAM50/template/PAM50_t1.nii.gz -iseg $SCT_DIR/data/PAM50/template/PAM50_cord.nii.gz -d dmri_crop_moco_dwi_mean.nii.gz -dseg dmri_crop_moco_dwi_mean_seg.nii.gz -param step=1,type=seg,algo=centermass:step=2,type=seg,algo=bsplinesyn,metric=MeanSquares,smooth=1,iter=3 -initwarp ../t2/warp_template2anat.nii.gz -initwarpinv ../t2/warp_anat2template.nii.gz -qc "$SCT_BP_QC_FOLDER"
# Rename warping fields for clarity
mv warp_PAM50_t12dmri_crop_moco_dwi_mean.nii.gz warp_template2dmri.nii.gz
mv warp_dmri_crop_moco_dwi_mean2PAM50_t1.nii.gz warp_dmri2template.nii.gz
# Warp template and white matter atlas
sct_warp_template -d dmri_crop_moco_dwi_mean.nii.gz -w warp_template2dmri.nii.gz -qc "$SCT_BP_QC_FOLDER"
# Compute DTI metrics
# Tips: The flag -method "restore" allows you to estimate the tensor with robust fit (see: sct_dmri_compute_dti -h)
sct_dmri_compute_dti -i dmri_crop_moco.nii.gz -bval bvals.txt -bvec bvecs.txt
# Compute FA within right and left lateral corticospinal tracts from slices 2 to 14 using weighted average method
sct_extract_metric -i dti_FA.nii.gz -z 2:14 -method wa -l 4,5 -o fa_in_cst.csv
# Bring metric to template space (e.g. for group mapping)
sct_apply_transfo -i dti_FA.nii.gz -d $SCT_DIR/data/PAM50/template/PAM50_t2.nii.gz -w warp_dmri2template.nii.gz
# Go back to root folder
cd ..
# fmri
# ===========================================================================================
cd fmri
# Average all fMRI time series (to be able to do the next step)
sct_maths -i fmri.nii.gz -mean t -o fmri_mean.nii.gz
# Get cord centerline
sct_get_centerline -i fmri_mean.nii.gz -c t2s
# Create mask around the cord to help motion correction and for faster processing
sct_create_mask -i fmri_mean.nii.gz -p centerline,fmri_mean_centerline.nii.gz -size 35mm
# Crop data
sct_crop_image -i fmri.nii.gz -m mask_fmri_mean.nii.gz -o fmri_crop.nii.gz
# Motion correction
# Tips: Here data have sufficient SNR and there is visible motion between two consecutive scans, so motion correction is more efficient with -g 1 (i.e. not average consecutive scans)
sct_fmri_moco -i fmri_crop.nii.gz -g 1
# Segment spinal cord manually
# Since these data have very poor cord/CSF contrast, it is difficult to segment the cord properly using sct_deepseg_sc
# and hence in this case we do it manually. The file is called: fmri_crop_moco_mean_seg_manual.nii.gz
# There is no command for this step, because the file is included in the 'sct_example_data' dataset.
# Generate QC for sct_fmri_moco ('fmri_crop_moco_mean_seg_manual.nii.gz' is needed to align each slice in the QC mosaic)
sct_qc -i fmri_crop.nii.gz -d fmri_crop_moco.nii.gz -s fmri_crop_moco_mean_seg_manual.nii.gz -p sct_fmri_moco -qc "$SCT_BP_QC_FOLDER"
# Register template->fmri
sct_register_multimodal -i $SCT_DIR/data/PAM50/template/PAM50_t2.nii.gz -iseg $SCT_DIR/data/PAM50/template/PAM50_cord.nii.gz -d fmri_crop_moco_mean.nii.gz -dseg fmri_crop_moco_mean_seg_manual.nii.gz -param step=1,type=seg,algo=slicereg,metric=MeanSquares,smooth=2:step=2,type=im,algo=bsplinesyn,metric=MeanSquares,iter=5,gradStep=0.5 -initwarp ../t2/warp_template2anat.nii.gz -initwarpinv ../t2/warp_anat2template.nii.gz -qc "$SCT_BP_QC_FOLDER"
# Rename warping fields for clarity
mv warp_PAM50_t22fmri_crop_moco_mean.nii.gz warp_template2fmri.nii.gz
mv warp_fmri_crop_moco_mean2PAM50_t2.nii.gz warp_fmri2template.nii.gz
# Warp template and spinal levels (here we don't need the WM atlas)
sct_warp_template -d fmri_crop_moco_mean.nii.gz -w warp_template2fmri.nii.gz -a 0 -s 1
# Note, once you have computed fMRI statistics in the subject's space, you can use
# warp_fmri2template.nii.gz to bring the statistical maps on the template space, for group analysis.
cd ..
# Display results (to easily compare integrity across SCT versions)
# ===========================================================================================
set +v
end=`date +%s`
runtime=$((end-start))
echo "~~~" # these are used to format as code when copy/pasting in github's markdown
echo "Version: `sct_version`"
echo "Ran on: `uname -nsr`"
echo "Duration: $(($runtime / 3600))hrs $((($runtime / 60) % 60))min $(($runtime % 60))sec"
echo "---"
# The file `test_batch_processing.py` will output tested values when run as a script
"$SCT_DIR"/python/envs/venv_sct/bin/python "$SCT_DIR"/testing/batch_processing/test_batch_processing.py
echo "~~~"
# Display syntax to open QC report on web browser
echo "To open Quality Control (QC) report on a web-browser, run the following:"
echo "$open_command $SCT_BP_QC_FOLDER/index.html"
|
neuropoly/spinalcordtoolbox
|
batch_processing.sh
|
Shell
|
mit
| 15,689 |
#!/bin/bash
DWL_USER_DNS_CONF="${conf}";
DWL_USER_DNS_DATA="`echo ${DWL_USER_DNS_CONF} | awk -F '[/]' '{print $5}' | sed "s|\.conf||g"`";
if [[ ! ("${DWL_USER_DNS_DATA}" =~ ^[0-9X]+_[a-z.-]+_[0-9]+) ]]; then
continue;
fi
DWL_USER_DNS="`echo ${DWL_USER_DNS_DATA} | awk -F '[_]' '{print $2}'`";
DWL_USER_DNS_PORT="`echo ${DWL_USER_DNS_DATA} | awk -F '[_]' '{print $3}'`";
DWL_USER_DNS_PORT_CONTAINER="`echo ${DWL_USER_DNS_DATA} | awk -F '[_]' '{print $1}'`";
DWL_USER_DNS_SERVERNAME="`echo \"${DWL_USER_DNS}\" | awk -F '[\.]' '{print $(NF-1)\".\"$NF}'`";
|
davask/d-apache
|
build/dwl/vhost-env.sh
|
Shell
|
mit
| 561 |
#!/usr/bin/env sh
webpack -p &&
for file in spec/es6/*.es6.js
do
out=spec/build/$(basename $file .es6.js).js
./node_modules/.bin/regenerator $file >$out
done &&
./node_modules/.bin/jasmine-node spec
|
AppliedMathematicsANU/plexus-csp
|
test.sh
|
Shell
|
mit
| 209 |
#!/bin/bash
# 2016 ROBERT WOLTERMAN (xtacocorex)
# WITH HELP FROM CHIP-hwtest/battery.sh
# MIT LICENSE, SEE LICENSE FILE
# LOGGING HAT-TIP TO http://urbanautomaton.com/blog/2014/09/09/redirecting-bash-script-output-to-syslog/
# THIS NEEDS TO BE RUN AS ROOT
# PROBABLY SET AS A CRON JOB EVERY 5 OR 10 MINUTES
# SIMPLE SCRIPT TO POWER DOWN THE CHIP BASED UPON BATTERY VOLTAGE
# CHANGE THESE TO CUSTOMIZE THE SCRIPT
# ****************************
# ** THESE MUST BE INTEGERS **
MINVOLTAGELEVEL=3000
MINCHARGECURRENT=10
# ****************************
readonly SCRIPT_NAME=$(basename $0)
log() {
echo "`date -u`" "$@"
#logger -p user.notice -t $SCRIPT_NAME "$@"
}
# TALK TO THE POWER MANAGEMENT
/usr/sbin/i2cset -y -f 0 0x34 0x82 0xC3
# GET POWER OP MODE
POWER_OP_MODE=$(/usr/sbin/i2cget -y -f 0 0x34 0x01)
# SEE IF BATTERY EXISTS
BAT_EXIST=$(($(($POWER_OP_MODE&0x20))/32))
if [ $BAT_EXIST == 1 ]; then
log "CHIP HAS A BATTERY ATTACHED"
BAT_VOLT_MSB=$(/usr/sbin/i2cget -y -f 0 0x34 0x78)
BAT_VOLT_LSB=$(/usr/sbin/i2cget -y -f 0 0x34 0x79)
BAT_BIN=$(( $(($BAT_VOLT_MSB << 4)) | $(($(($BAT_VOLT_LSB & 0x0F)) )) ))
BAT_VOLT_FLOAT=$(echo "($BAT_BIN*1.1)"|bc)
# CONVERT TO AN INTEGER
BAT_VOLT=${BAT_VOLT_FLOAT%.*}
# GET THE CHARGE CURRENT
BAT_ICHG_MSB=$(/usr/sbin/i2cget -y -f 0 0x34 0x7A)
BAT_ICHG_LSB=$(/usr/sbin/i2cget -y -f 0 0x34 0x7B)
BAT_ICHG_BIN=$(( $(($BAT_ICHG_MSB << 4)) | $(($(($BAT_ICHG_LSB & 0x0F)) )) ))
BAT_ICHG_FLOAT=$(echo "($BAT_ICHG_BIN*0.5)"|bc)
# CONVERT TO AN INTEGER
BAT_ICHG=${BAT_ICHG_FLOAT%.*}
# CHECK BATTERY LEVEL AGAINST MINVOLTAGELEVEL
if [ $BAT_VOLT -le $MINVOLTAGELEVEL ]; then
log "CHIP BATTERY VOLTAGE IS LESS THAN $MINVOLTAGELEVEL"
log "CHECKING FOR CHIP BATTERY CHARGING"
# IF CHARGE CURRENT IS LESS THAN MINCHARGECURRENT, WE NEED TO SHUTDOWN
if [ $BAT_ICHG -le $MINCHARGECURRENT ]; then
log "CHIP BATTERY IS NOT CHARGING, SHUTTING DOWN NOW"
shutdown -h now
else
log "CHIP BATTERY IS CHARGING"
fi
else
log "CHIP BATTERY LEVEL IS GOOD"
fi
if [ $BAT_ICHG -le $MINCHARGECURRENT ]; then
log "CHIP IS ON BATTERY POWER"
else
log "CHIP IS ON WALL POWER"
fi
fi
|
xtacocorex/chip_batt_autoshutdown
|
chip_autoshutdown.sh
|
Shell
|
mit
| 2,301 |
#!/bin/bash
if ! [ -e "/cfg/stunnel.conf" ]; then
echo "Error: file /cfg/$file is missing"
exit 1
fi
if touch /cfg/test.file 2>/dev/null; then
echo "Error: /cfg should be mounted read-only"
exit 1
fi
exec /usr/bin/stunnel4 /cfg/stunnel.conf
|
shaneoc/dockerfiles
|
images/stunnel/start.sh
|
Shell
|
mit
| 260 |
sudo apt-get update
sudo apt-get install ubuntu-desktop
# JAVA JDK
sudo apt-get install python-software-properties
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer
sudo apt install oracle-java8-set-default
# MAVEN
sudo apt-get install maven
# Netbeans 8.2
sudo apt-get install wget
sudo wget http://download.netbeans.org/netbeans/8.2/final/bundles/netbeans-8.2-javaee-linux.sh
JAVA_HOME=/usr/lib/jvm/java-8-oracle
sudo sh ./netbeans-8.2-javaee-linux.sh
|
krandalf75/linux-utils
|
dev-install.sh
|
Shell
|
mit
| 516 |
#!/bin/bash
#
# [!] Required fix for s3cmd 1.5.0 on OS X
#
echo " => Creating symlink for python to as 'python2' from 'python2.7'"
if [ -x '/usr/bin/python2' ] ; then
echo " (i) /usr/bin/python2 already exists and executable - no fix needed"
exit 0
fi
echo " (i) No /usr/bin/python2 executable found - creating symlink from python2.7"
cd /usr/bin
sudo ln python2.7 python2
exit $?
|
bazscsa/steps-amazon-s3-deploy
|
__s3cmd_osx_fix.sh
|
Shell
|
mit
| 387 |
mkdir -p ~/Library/Application\ Support/XamarinStudio-6.0/LocalInstall/Addins
rm ~/Library/Application\ Support/XamarinStudio-6.0/LocalInstall/Addins/unity-mimetypes.dll
cp -p bin/Debug/unity-mimetypes.dll ~/Library/Application\ Support/XamarinStudio-6.0/LocalInstall/Addins/
ls -l ~/Library/Application\ Support/XamarinStudio-6.0/LocalInstall/Addins/
|
jools-adams/monodevelop-unity-mimetypes
|
install.sh
|
Shell
|
mit
| 352 |
#!/usr/bin/env bash
# cd /home/gpu/db/Dropbox/dev2/cpp/gpgpu/Data-Science-PyCUDA-GPU/docker
nvidia-docker run -it -p 5555:5555 -p 7842:7842 -p 8787:8787 -p 8786:8786 -p 8788:8788 -v ~/dev/:/root/sharedfolder quantscientist/pycuda bash
|
QuantScientist/Deep-Learning-Boot-Camp
|
docker/run_docker_gpu.sh
|
Shell
|
mit
| 237 |
# Usage:
#
# $ brew update
# You should execute this first to update everything locally.
#
# $ brew-cask.sh [update]
# This will list all of your cask packages and rather there is an upgrade
# pending with a ✔ checkmark, just like Homebrew does with "brew update".
# The update command is optional, as it doesn't actually do any tracking, there's
# not really anything to "update" with cask. But it keeps with the pattern of
# of Homebrew's "brew update" pattern for those with memory muscle fingers (like me).
#
# $ brew-cask.sh upgrade
# This performs a "brew cask install <cask> --force" of all cask packages that have
# an update pending.
#
# This code was inspired by http://stackoverflow.com/a/36000907/56693
# get the list of installed casks
casks=( $(brew cask list) )
caskroom=/usr/local/Caskroom
if [[ "$1" == "upgrade" ]]; then
for cask in ${casks[@]}; do
current="$(brew cask info $cask | sed -n '1p' | sed -n 's/^.*: \(.*\)$/\1/p')"
installed=( $(ls $caskroom/$cask))
if (! [[ " ${installed[@]} " == *" $current "* ]]); then
echo "Upgrading $cask to v$current."
(set -x; brew cask install $cask --force;)
else
echo "$cask is up-to-date, skipping."
fi
done
else
echo "Inspecting ${#casks[@]} casks. Use 'brew-cask.sh upgrade' to perform any updates."
for (( i = i ; i < ${#casks[@]} ; i++ )); do
current="$(brew cask info ${casks[$i]} | sed -n '1p' | sed -n 's/^.*: \(.*\)$/\1/p')"
installed=( $(ls $caskroom/${casks[$i]}))
if (! [[ " ${installed[@]} " == *" $current "* ]]); then
casks[$i]="${casks[$i]}$(tput sgr0)$(tput setaf 2) ✔$(tput sgr0)"
fi
done
echo " ${casks[@]/%/$'\n'}"
fi
|
rm-hull/dotfiles
|
bin/brew-cask.sh
|
Shell
|
mit
| 1,706 |
#!/bin/sh
whiptail --title "Example Dialog" --msgbox "This is an example of a message box. You must hit OK to continue." 8 78
|
jeremiedecock/snippets
|
shell/whiptail/message_box.sh
|
Shell
|
mit
| 127 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-MacOSX
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=dylib
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/passwordmanager
OUTPUT_BASENAME=passwordmanager
PACKAGE_TOP_DIR=passwordmanager/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/passwordmanager/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/passwordmanager.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/passwordmanager.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
DanielWieczorek/PasswordManager
|
nbproject/Package-Release.bash
|
Shell
|
mit
| 1,493 |
################################################################################
# https://github.com/relsqui/doily
#
# (c) 2017 YOUR NAME HERE
# You are free to use, copy, modify, etc. this by the terms of the MIT license.
# See included LICENSE.txt for details.
################################################################################
# This should be the same as the filename of the plugin, minus the .sh part.
PLUGIN_NAME="word_count"
PLUGIN_AUTHOR="Finn Ellis <[email protected]>"
PLUGIN_VERSION="v0.1.0"
PLUGIN_DESCRIPTION="Counts words in all of your dailies."
read -d ' ' PLUGIN_HELP <<EOF
Use the \`doily wc\` command to print a listing of how many words are in your
daily files. They're listed chronologically; to see which days you wrote the
most on, pipe the output to sort, such as: \`doily wc | sort -n\`
EOF
# At which points should this plugin be activated?
PROVIDES_PRE_WRITE=false
PROVIDES_POST_WRITE=false
# What commands does this plugin enable?
PROVIDES_COMMANDS=( wc )
call_command() {
# Maps custom commands to the functions they should run.
# If you set PROVIDES_COMMANDS to ( ), you can delete this.
comm="$1"
shift
case "${comm}" in
# command) function_to_run "$@" ;;
wc) count_words "$@" ;;
esac
}
count_words() {
# This function is run when you call `doily test1`.
cd "${DAILIES}"
wc -w *
cd - >/dev/null
}
main() {
############################################################################
# DO NOT EDIT THIS. Edit the variables and functions above instead!
#
# Responds to requests from the Doily main script with either information
# about the plugin or an action to be taken in response to a hook or a
# command entered by the user.
#
# Globals:
# - CONF_FILE (exported by Doily)
# - PLUGIN_NAME
# - PROVIDES_PRE_WRITE
# - PROVIDES_POST_WRITE
# - PROVIDES_COMMANDS
# Args:
# - The type of information requested by Doily. This can be either:
# . The name of a hook ("pre_write" or "post_write"), OR
# . The string "command" followed by a command name and arguments, OR
# . The string "provides" followed by either "pre_write",
# "post_write", or "commands".
# Returns:
# - None.
############################################################################
source "${CONF_FILE}"
config="${XDG_CONFIG_HOME:-$HOME/.config}/doily/plugins/${PLUGIN_NAME}.conf"
if [[ -e "${config}" ]]; then
source "${config}"
fi
case "$1" in
pre_write) pre_write ;;
post_write) post_write ;;
call_command) shift; call_command "$@" ;;
provides)
shift
case "$1" in
pre_write) $PROVIDES_PRE_WRITE && return 0 || return 1 ;;
post_write) $PROVIDES_POST_WRITE && return 0 || return 1 ;;
# printf allows us to easily quote multi-word command names.
commands) echo $(printf "'%s' " "${PROVIDES_COMMANDS[@]}") ;;
esac
;;
esac
}
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
main "$@"
fi
|
relsqui/doily
|
plugins/word_count.sh
|
Shell
|
mit
| 3,174 |
#!/bin/bash
# Author: Yunkai Cui
# License: LICENSE
# Description:
# This script is used to setup a python development environment in vim
# Run the script by:
# $: ./vimsetup.sh [options]
# options:
# -c Compile with llvm to have c family language auto completion support
# Get vundle
mkdir -p ~/.vim/bundle/
git clone https://github.com/gmarik/vundle.git ~/.vim/bundle/vundle/
# Added the lines to vimrc to support python integration
cat vimconf >> ~/.vimrc
# Install Powerline font
git clone https://github.com/powerline/fonts.git powerlinefont/
cd powerlinefont/
./install.sh
cd ..
rm -rf powerlinefont
# Install bundles
vim +PluginInstall +qall
# build YCM
mkdir ycm_build
if [ $# -eq 1 ] && [ "$1" = "-c" ]; then
curl -SL http://llvm.org/releases/3.6.0/clang+llvm-3.6.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz | tar xJ
cd ycm_build
cmake -G "Unix Makefiles" -DPATH_TO_LLVM_ROOT=../clang+llvm-3.6.0-x86_64-linux-gnu . ~/.vim/bundle/YouCompleteMe/third_party/ycmd/cpp
else
cd ycm_build
# with no c language semantic support
cmake -G "Unix Makefiles" . ~/.vim/bundle/YouCompleteMe/third_party/ycmd/cpp
fi
make ycm_support_libs
cd ..
rm -r ycm_build
exit 0
|
princeedward/vim_python_setup
|
vimsetup.sh
|
Shell
|
mit
| 1,173 |
#!/bin/bash
docker build -t local/meshblu-benchmark . && \
docker run \
--rm \
--publish 80:80 \
--env MESHBLU_SERVER=172.17.8.1 \
--env MESHBLU_PORT=3000 \
local/meshblu-benchmark \
message-webhook \
--host 172.17.8.101 \
--port 80 \
--number-of-times 100
|
octoblu/meshblu-benchmark
|
run.sh
|
Shell
|
mit
| 310 |
#!/usr/bin/env bash
set -euo pipefail
./bin/install-phpunit-9.sh
./bin/install-phpcs.sh
|
exercism/xphp
|
bin/install.sh
|
Shell
|
mit
| 90 |
###########################
### BOOTSTRAP FUNCTIONS ###
###########################
System.Bootstrap(){
local file
local path
for file in "$__oo__libPath"/system/*.sh
do
path="$(File.GetAbsolutePath "$file")"
__oo__importedFiles+=( "$path" )
## note: aliases are visible inside functions only if
## they were initialized AFTER they were created
## this is the reason why we have to load lib/system/* in a specific order (numbers)
if ! source "$path"
then
cat <<< "FATAL ERROR: Unable to bootstrap (loading $libPath)" 1>&2
exit 1
fi
done
}
File.GetAbsolutePath() {
# http://stackoverflow.com/questions/3915040/bash-fish-command-to-print-absolute-path-to-a-file
# $1 : relative filename
if [[ "$file" == "/"* ]]
then
echo "$file"
else
echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
fi
}
########################
### INITIALZE SYSTEM ###
########################
# From: http://wiki.bash-hackers.org/scripting/debuggingtips
export PS4='+(${BASH_SOURCE[1]##*/}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
# Bash will remember & return the highest exitcode in a chain of pipes.
# This way you can catch the error inside pipes, e.g. mysqldump | gzip
set -o pipefail
shopt -s expand_aliases
declare -g __oo__libPath="$( cd "${BASH_SOURCE[0]%/*}" && pwd )"
declare -g __oo__path="${__oo__libPath}/.."
declare -ag __oo__importedFiles
System.Bootstrap
#########################
### HANDLE EXCEPTIONS ###
#########################
trap "__EXCEPTION_TYPE__=\"\$_\" command_not_found_handle \$BASH_COMMAND" ERR
set -o errtrace # trace ERR through 'time command' and other functions
|
mikelane/bash-oo-framework
|
lib/oo-framework.sh
|
Shell
|
mit
| 1,740 |
#!/bin/sh
SERVICEFILE=./service-info.txt
SRC=$(sed '5q;d' $SERVICEFILE)
echo "Setting up project:"
echo " * installing local dependencies: (cd $SRC && npm install && bower install)"
sudo npm install ember-cli -g
(cd $SRC && npm install && bower install --allow-root)
|
DURAARK/workbench-ui
|
_devops/setup.sh
|
Shell
|
mit
| 271 |
#!/usr/bin/env bash
IFS=":" read -r previous current < <(git tag --sort 'version:refname' | tail -n2 | paste -sd':' -)
ROOT_DIR="$(git rev-parse --show-toplevel)"
npx ts-node "${ROOT_DIR}/bin/gen-changelog.ts" --from "${previous}" --to "${current}"
|
unbounce/iidy
|
bin/gen-changelog.sh
|
Shell
|
mit
| 249 |
#!/bin/bash
cmd="psql template1 --tuples-only --command \"select count(*) from pg_database where datname = 'rugby';\""
db_exists=`eval $cmd`
if [ $db_exists -eq 0 ] ; then
cmd="createdb rugby;"
eval $cmd
fi
psql rugby -f schema/create_schema.sql
mkdir /tmp/data
cp csv/*.csv /tmp/data
#dos2unix /tmp/data/*
#tail -q -n+2 /tmp/data/*.csv >> /tmp/games.csv
cat /tmp/data/*.csv >> /tmp/games.csv
#sed -e 's/$/,,/' -i /tmp/games.csv
psql rugby -f loaders/load_games.sql
rm /tmp/data/*.csv
rmdir /tmp/data
rm /tmp/games.csv
|
octonion/rugby
|
pro14/scripts/load.sh
|
Shell
|
mit
| 533 |
#!/bin/bash
set -o pipefail -e
config_dir=${1-/etc/nginx/conf.d}
mysql_host=${2-localhost}
mysql_database=${3-keitaro}
echo "Using $config_dir directory for searching configs"
cd "$config_dir/" || return
# Searching for configs generated by enable-ssl
grep -l "enable-ssl" -- *.conf \
| sed 's/.conf//g' \
> domains_with_ssl_certs.txt || sleep 1000
# Getting list of domains in DB
mysql -h "$mysql_host" "$mysql_database" -sNe "SELECT name FROM keitaro_domains ORDER BY name" \
| uniq \
> domains_to_keep.txt
# Searching for custom domains
(grep -l "add-site" -- *.conf || echo "") | sed 's/.conf//g' >> domains_to_keep.txt
# Exclude domains exists in domains_to_keep.txt
comm -23 <(sort domains_with_ssl_certs.txt) <(sort domains_to_keep.txt) \
> domains_to_delete.txt
numbers=$(wc -l < domains_to_delete.txt)
echo -n "Disabling $numbers ssl configs"
# Call kctl-disable-ssl for each domain
while read -r domain; do
# We must be absolutely sure that we don't delete exists domain
mysql -e "select count(*) from ${mysql_database}.keitaro_domains where name = '${domain}'" \
| grep '1' && exit 1 && echo "Attempt to delete exists domain ${domain}"
if [ -z "$DRY_MODE" ]; then
kctl-disable-ssl -D "$domain"
else
echo "kctl-disable-ssl -D $domain"
fi
done <domains_to_delete.txt
# Cleaning
rm domains_with_ssl_certs.txt domains_to_keep.txt domains_to_delete.txt
|
keitarocorp/centos_provision
|
scripts/prune-ssl.sh
|
Shell
|
mit
| 1,408 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/ExhibitionSwift/ExhibitionSwift.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/ExhibitionSwift/ExhibitionSwift.framework"
fi
|
eligreg/ExhibitionSwift
|
Example/Pods/Target Support Files/Pods-ExhibitionSwift_Example/Pods-ExhibitionSwift_Example-frameworks.sh
|
Shell
|
mit
| 3,637 |
#!/usr/bin/env bash
echo "Don't know how to automate this (creds), so here are the instructions:"
echo ""
echo "cut a github release, checkout that tag locally"
echo ""
echo "use berkshelf to vendor a clean version of the cookbook, and update to supermarket"
echo ""
echo "berks vendor cookbooks"
echo 'knife cookbook site share anaconda "Programming Languages" --cookbook-path cookbooks --config ${CHEF_PUB}'
echo ""
echo "https://docs.getchef.com/knife_cookbook_site.html#share"
echo "BUMP METADATA.RB for next release! master is development"
echo ""
echo "https://supermarket.chef.io/cookbooks/anaconda"
|
nareynolds/vagrant-chef-solo-anaconda3
|
cookbooks/anaconda/script/release-to-supermarket.sh
|
Shell
|
mit
| 612 |
# -*- shell-script -*-
# "set autolist" debugger command
#
# Copyright (C) 2010-2011, 2016 Rocky Bernstein <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place, Suite 330, Boston,
# MA 02111 USA.
_Dbg_help_add_sub set autolist \
'**set autolist** [**on**|**off**]
Run list command automatically every time the debugger enters
See also:
---------
**show autolist**' 1
_Dbg_next_complete[set autolist]='_Dbg_complete_onoff'
_Dbg_do_set_autolist() {
typeset onoff=${1:-'off'}
case $onoff in
on | 1 )
_Dbg_write_journal_eval "_Dbg_cmdloop_hooks[list]=_Dbg_do_list"
;;
off | 0 )
_Dbg_write_journal_eval "unset '_Dbg_cmdloop_hooks[list]'"
unset '_Dbg_cmdloop_hooks[list]'
;;
* )
_Dbg_errmsg "\"on\" or \"off\" expected."
return 1
esac
_Dbg_do_show 'autolist'
return 0
}
|
rogalmic/vscode-bash-debug
|
bashdb_dir/command/set_sub/autolist.sh
|
Shell
|
mit
| 1,481 |
# src/bash/pgsql-runner/funcs/print-usage.test.sh
# v1.0.9
# ---------------------------------------------------------
# todo: add doTestPrintUsage comments ...
# ---------------------------------------------------------
doTestPrintUsage(){
doLog "DEBUG START doTestPrintUsage"
cat doc/txt/pgsql-runner/tests/print-usage.test.txt
sleep 2
# add your action implementation code here ...
doLog "DEBUG STOP doTestPrintUsage"
}
# eof func doTestPrintUsage
# eof file: src/bash/pgsql-runner/funcs/print-usage.test.sh
|
YordanGeorgiev/pgsql-runner
|
src/bash/pgsql-runner/tests/print-usage.test.sh
|
Shell
|
mit
| 525 |
#!/usr/bin/env bash
git filter-branch -f --env-filter "
GIT_AUTHOR_NAME='David Qu'
GIT_AUTHOR_EMAIL='[email protected]'
GIT_COMMITTER_NAME='David Qu'
GIT_COMMITTER_EMAIL='[email protected]'
" HEAD
|
dqu123/RoboTrike
|
set_all_commit_authors.sh
|
Shell
|
mit
| 225 |
#!/bin/bash
yarn --cwd packages/next/bundles
cp packages/next/bundles/node_modules/webpack5/lib/hmr/HotModuleReplacement.runtime.js packages/next/bundles/webpack/packages/
cp packages/next/bundles/node_modules/webpack5/lib/hmr/JavascriptHotModuleReplacement.runtime.js packages/next/bundles/webpack/packages/
yarn --cwd packages/next ncc-compiled
# Make sure to exit with 1 if there are changes after running ncc-compiled
# step to ensure we get any changes committed
if [[ ! -z $(git status -s) ]];then
echo "Detected changes"
git status
exit 1
fi
|
flybayer/next.js
|
check-pre-compiled.sh
|
Shell
|
mit
| 558 |
#!/usr/bin/env bash
#
# BespokeView Consulting, Inc.
# <[email protected]>
#
# [email protected]:ryanjohnston/dotfiles.git
cd "$(dirname "$0")/.."
DOTFILES_ROOT=$(pwd -P)
POWERLINE_ROOT="/usr/local/lib/python2.7/site-packages"
set -e
echo ''
source script/common.sh
if [ "$(uname -s)" == "Darwin" ]
then
if [ -d "$POWERLINE_ROOT/powerline" ]
then
info 'Powerline already installed.'
else
info 'Installing powerline...'
pip2 install psutil
pip2 install powerline-status
info 'Powerline installed.'
fi
fi
|
ryanjohnston/dotfiles
|
script/powerline.sh
|
Shell
|
mit
| 538 |
#!/bin/bash
# LinuxGSM command_dev_detect_deps.sh function
# Author: Daniel Gibbs
# Website: https://linuxgsm.com
# Description: Detects dependencies the server binary requires.
commandname="DEV-DETECT-DEPS"
commandaction="Developer detect deps"
functionselfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
fn_firstcommand_set
echo -e "================================="
echo -e "Dependencies Checker"
echo -e "================================="
echo -e "Checking directory: "
echo -e "${serverfiles}"
if [ "$(command -v eu-readelf 2>/dev/null)" ]; then
readelf=eu-readelf
elif [ "$(command -v readelf 2>/dev/null)" ]; then
readelf=readelf
else
echo -e "readelf/eu-readelf not installed"
fi
files=$(find "${serverfiles}" | wc -l)
find "${serverfiles}" -type f -print0 |
while IFS= read -r -d $'\0' line; do
if [ "${readelf}" == "eu-readelf" ]; then
${readelf} -d "${line}" 2>/dev/null | grep NEEDED| awk '{ print $4 }' | sed 's/\[//g;s/\]//g' >> "${tmpdir}/.depdetect_readelf"
else
${readelf} -d "${line}" 2>/dev/null | grep NEEDED | awk '{ print $5 }' | sed 's/\[//g;s/\]//g' >> "${tmpdir}/.depdetect_readelf"
fi
echo -n "${i} / ${files}" $'\r'
((i++))
done
sort "${tmpdir}/.depdetect_readelf" |uniq >"${tmpdir}/.depdetect_readelf_uniq"
touch "${tmpdir}/.depdetect_centos_list"
touch "${tmpdir}/.depdetect_ubuntu_list"
touch "${tmpdir}/.depdetect_debian_list"
while read -r lib; do
echo -e "${lib}"
libs_array=( libm.so.6 libc.so.6 libtcmalloc_minimal.so.4 libpthread.so.0 libdl.so.2 libnsl.so.1 libgcc_s.so.1 librt.so.1 ld-linux.so.2 libdbus-glib-1.so.2 libgio-2.0.so.0 libglib-2.0.so.0 libGL.so.1 libgobject-2.0.so.0 libnm-glib.so.4 libnm-util.so.2 )
for lib_file in "${libs_array[@]}"; do
if [ "${lib}" == "${lib_file}" ]; then
echo -e "glibc.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "lib32gcc1" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "lib32gcc1" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
fi
done
libs_array=( libawt.so libjava.so libjli.so libjvm.so libnet.so libnio.so libverify.so )
for lib_file in "${libs_array[@]}"; do
if [ "${lib}" == "${lib_file}" ]; then
echo -e "java-1.8.0-openjdk" >> "${tmpdir}/.depdetect_centos_list"
echo -e "default-jre" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "default-jre" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
fi
done
libs_array=( libtier0.so libtier0_srv.so libvstdlib_srv.so Core.so libvstdlib.so libtier0_s.so Editor.so Engine.so liblua.so libsteam_api.so ld-linux-x86-64.so.2 libPhysX3_x86.so libPhysX3Common_x86.so libPhysX3Cooking_x86.so)
for lib_file in "${libs_array[@]}"; do
# Known shared libs what dont requires dependencies.
if [ "${lib}" == "${lib_file}" ]; then
libdetected=1
fi
done
if [ "${lib}" == "libstdc++.so.6" ]; then
echo -e "libstdc++.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libstdc++6:i386" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libstdc++6:i386" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libstdc++.so.5" ]; then
echo -e "compat-libstdc++-33.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libstdc++5:i386" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libstdc++5:i386" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libcurl-gnutls.so.4" ]; then
echo -e "libcurl.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libcurl4-gnutls-dev:i386" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libcurl4-gnutls-dev:i386" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libspeex.so.1" ]||[ "${lib}" == "libspeexdsp.so.1" ]; then
echo -e "speex.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "speex:i386" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "speex:i386" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "./libSDL-1.2.so.0" ]||[ "${lib}" == "libSDL-1.2.so.0" ]; then
echo -e "SDL.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libsdl1.2debian" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libsdl1.2debian" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libtbb.so.2" ]; then
echo -e "tbb.i686" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libtbb2" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libtbb2" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libXrandr.so.2" ]; then
echo -e "libXrandr" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libxrandr2" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libxrandr2" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libXext.so.6" ]; then
echo -e "libXext" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libxext6" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libxext6" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libXtst.so.6" ]; then
echo -e "libXtst" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libxtst6" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libxtst6" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libpulse.so.0" ]; then
echo -e "pulseaudio-libs" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libpulse0" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libpulse0" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libopenal.so.1" ]; then
echo -e "" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libopenal1" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libopenal1" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libgconf-2.so.4" ]; then
echo -e "GConf2" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libgconf2-4" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libgconf2-4" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libz.so.1" ]; then
echo -e "zlib" >> "${tmpdir}/.depdetect_centos_list"
echo -e "zlib1g" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "zlib1g" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libatk-1.0.so.0" ]; then
echo -e "atk" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libatk1.0-0" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libatk1.0-0" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libcairo.so.2" ]; then
echo -e "cairo" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libcairo2" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libcairo2" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libfontconfig.so.1" ]; then
echo -e "fontconfig" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libfontconfig1" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libfontconfig1" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
elif [ "${lib}" == "libfreetype.so.6" ]; then
echo -e "freetype" >> "${tmpdir}/.depdetect_centos_list"
echo -e "libfreetype6" >> "${tmpdir}/.depdetect_ubuntu_list"
echo -e "libfreetype6" >> "${tmpdir}/.depdetect_debian_list"
libdetected=1
fi
if [ "${libdetected}" != "1" ]; then
unknownlib=1
echo -e "${lib}" >> "${tmpdir}/.depdetect_unknown"
fi
unset libdetected
done < "${tmpdir}/.depdetect_readelf_uniq"
sort "${tmpdir}/.depdetect_centos_list" | uniq >> "${tmpdir}/.depdetect_centos_list_uniq"
sort "${tmpdir}/.depdetect_ubuntu_list" | uniq >> "${tmpdir}/.depdetect_ubuntu_list_uniq"
sort "${tmpdir}/.depdetect_debian_list" | uniq >> "${tmpdir}/.depdetect_debian_list_uniq"
if [ "${unknownlib}" == "1" ]; then
sort "${tmpdir}/.depdetect_unknown" | uniq >> "${tmpdir}/.depdetect_unknown_uniq"
fi
awk -vORS='' '{ print $1,$2 }' "${tmpdir}/.depdetect_centos_list_uniq" > "${tmpdir}/.depdetect_centos_line"
awk -vORS='' '{ print $1,$2 }' "${tmpdir}/.depdetect_ubuntu_list_uniq" > "${tmpdir}/.depdetect_ubuntu_line"
awk -vORS='' '{ print $1,$2 }' "${tmpdir}/.depdetect_debian_list_uniq" > "${tmpdir}/.depdetect_debian_line"
echo -e ""
echo -e ""
echo -e "Required Dependencies"
echo -e "================================="
echo -e "${executable}"
echo -e ""
echo -e "CentOS"
echo -e "================================="
cat "${tmpdir}/.depdetect_centos_line"
echo -e ""
echo -e ""
echo -e "Ubuntu"
echo -e "================================="
cat "${tmpdir}/.depdetect_ubuntu_line"
echo -e ""
echo -e ""
echo -e "Debian"
echo -e "================================="
cat "${tmpdir}/.depdetect_debian_line"
echo -e ""
if [ "${unknownlib}" == "1" ]; then
echo -e ""
echo -e "Unknown shared Library"
echo -e "================================="
cat "${tmpdir}/.depdetect_unknown"
fi
echo -e ""
echo -e "Required Librarys"
echo -e "================================="
sort "${tmpdir}/.depdetect_readelf" | uniq
echo -en "\n"
rm -f "${tmpdir:?}/.depdetect_centos_line"
rm -f "${tmpdir:?}/.depdetect_centos_list"
rm -f "${tmpdir:?}/.depdetect_centos_list_uniq"
rm -f "${tmpdir:?}/.depdetect_debian_line"
rm -f "${tmpdir:?}/.depdetect_debian_list"
rm -f "${tmpdir:?}/.depdetect_debian_list_uniq"
rm -f "${tmpdir:?}/.depdetect_ubuntu_line"
rm -f "${tmpdir:?}/.depdetect_ubuntu_list"
rm -f "${tmpdir:?}/.depdetect_ubuntu_list_uniq"
rm -f "${tmpdir:?}/.depdetect_readelf"
rm -f "${tmpdir:?}/.depdetect_readelf_uniq"
rm -f "${tmpdir:?}/.depdetect_unknown"
rm -f "${tmpdir:?}/.depdetect_unknown_uniq"
core_exit.sh
|
dgibbs64/linuxgsm
|
lgsm/functions/command_dev_detect_deps.sh
|
Shell
|
mit
| 9,284 |
autoload colors && colors
# cheers, @ehrenmurdick
# http://github.com/ehrenmurdick/config/blob/master/zsh/prompt.zsh
git_branch() {
echo $(/usr/bin/git symbolic-ref HEAD 2>/dev/null | awk -F/ {'print $NF'})
}
git_dirty() {
if $(! /usr/bin/git status -s &> /dev/null)
then
echo ""
else
if [[ $(/usr/bin/git status --porcelain) == "" ]]
then
echo "on %{$fg_bold[green]%}$(git_prompt_info)%{$reset_color%}"
else
echo "on %{$fg_bold[red]%}$(git_prompt_info)%{$reset_color%}"
fi
fi
}
git_prompt_info () {
ref=$(/usr/bin/git symbolic-ref HEAD 2>/dev/null) || return
# echo "(%{\e[0;33m%}${ref#refs/heads/}%{\e[0m%})"
echo "${ref#refs/heads/}"
}
unpushed () {
/usr/bin/git cherry -v @{upstream} 2>/dev/null
}
need_push () {
if [[ $(unpushed) == "" ]]
then
echo " "
else
echo " with %{$fg_bold[magenta]%}unpushed%{$reset_color%} "
fi
}
rvm_prompt(){
if $(which rvm &> /dev/null)
then
echo "%{$fg_bold[yellow]%}$(rvm tools identifier)%{$reset_color%}"
else
echo ""
fi
}
username_prompt(){
echo "%{$fg_bold[yellow]%}%n%{$reset_color%}"
}
# This keeps the number of todos always available the right hand side of my
# command line. I filter it to only count those tagged as "+next", so it's more
# of a motivation to clear out the list.
todo(){
if $(which todo.sh &> /dev/null)
then
num=$(echo $(todo.sh ls +next | wc -l))
let todos=num-2
if [ $todos != 0 ]
then
echo "$todos"
else
echo ""
fi
else
echo ""
fi
}
directory_name(){
echo "%{$fg_bold[cyan]%}%1/%\/%{$reset_color%}"
}
export PROMPT=$'\n$(username_prompt) in $(directory_name) $(git_dirty)$(need_push)\n› '
set_prompt () {
export RPROMPT="%{$fg_bold[cyan]%}$(todo)%{$reset_color%}"
}
precmd() {
title "zsh" "%m" "%55<...<%~"
set_prompt
}
|
Crunch09/dotfiles
|
zsh/prompt.zsh
|
Shell
|
mit
| 1,832 |
#!/bin/bash
FUNCTIONFILES="/opt/bag-of-bash/bash-template/function-files.list"
SCRIPTFOOTER="/opt/bag-of-bash/bash-template/bash.footer"
SCRIPT="${1}"
if [ -z "${1}" ]; then
read -p "Enter new script name:" SCRIPT
fi
if [ ! -f "${SCRIPT}" ]; then
echo "INFO: Creating ${SCRIPT}" 1>&2
cat > "${SCRIPT}" <<HERE
#!/bin/bash
# PURPOSE:
# DEPENDENCIES:
# DELIVERABLES:
#====Begin function sourcing==========================================
$(if [ -f "${FUNCTIONFILES:-/dev/null/null}" ]; then
cat "${FUNCTIONFILES}" | while read FLINE; do
echo "#. ${FLINE}"
done
else
echo "#"
fi)
#====End function sourcing============================================
#====Begin Configuration variables====================================
# Note: The values may be changed by command line args below.
#
#====End configuration variables======================================
#====Begin internal function definitions==============================
function Usage () {
cat <<EOF
USAGE: ./\${0##*/}
-h Print this helpful message
EOF
}
#====End internal function definitions================================
# To add options: add the character to getopts and add the option to the case statement.
# Options with an argument must have a : following them in getopts. The value is stored in OPTARG
# The lone : at the start of the getopts suppresses verbose error messages from getopts.
#====Begin command line argument parsing==============================
# ---Test variables set by arguments in unlocked tests---
while getopts ":h" Option; do
case \${Option} in
# Options are terminated with ;;
h ) Usage 1>&2
exit 0;;
# This * ) option must remain last. Any options past this will be ignored.
* ) echo "ERROR: Unexpected argument: \${OPTARG}" 1>&2
Usage 1>&2
exit 1;;
esac
done
#====End command line argument parsing================================
#====Begin unlocked tests (exit here will not leave script locked)====
#
#====End unlocked tests===============================================
#~~~~Lock automated script---REQUIRED for crontabs~~~~
# Latest version: https://github.com/secure411dotorg/process-locking.git
#HASHARGS4LOCK="true" #prevent same script with the same args from running concurrently
#HASHARGS4LOCK="false" #prevent running concurrently without regard for args
#. /opt/process-locking/process-locking-header.sh
#====Begin locked tests===============================================
#
#====End locked tests and begin execution=============================
$(if [ -f "${SCRIPTFOOTER:-/dev/null/null}" ]; then
cat "${SCRIPTFOOTER}"
echo
fi)
#~~~~Unlock automated script---REQUIRED for crontabs~~~~
#. /opt/process-locking/process-locking-footer.sh
HERE
chmod +x "${SCRIPT}"
else
echo "ERROR: ${SCRIPT} already exists." 1>&2
fi
|
ZLightning/bag-of-bash
|
bash-template/new-bash-script.sh
|
Shell
|
mit
| 2,794 |
#!/bin/bash
sudo apt-get update
sudo apt-get install -f
sudo apt-get install python-setuptools python-numpy python-scipy python-matplotlib python-pip -y
sudo pip install numpy scipy matplotlib scikit-learn luminol -y
|
alonecoder1337/Dos-Attack-Detection-using-Machine-Learning
|
install.sh
|
Shell
|
mit
| 218 |
OSTYPE=`uname`
# Easier navigation: .., ..., ...., ....., ~ and -
alias ..2='cd ../..'
alias ..3='cd ../../..'
alias ..4='cd ../../../..'
# One of @janmoesen’s ProTip™s
for method in GET HEAD POST PUT DELETE TRACE OPTIONS; do
alias "$method"="lwp-request -m '$method'"
done
# Detect which `ls` flavor is in use
# For `ls` color setting go check theme/richchou.theme
if ls --color > /dev/null 2>&1; then # GNU `ls`
colorflag="--color"
else # OS X `ls`
colorflag="-G"
fi
alias ls="ls ${colorflag}"
# IP addresses
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias localip="ipconfig getifaddr en0"
alias ips="ifconfig -a | grep -o 'inet6\? \(addr:\)\?\s\?\(\(\([0-9]\+\.\)\{3\}[0-9]\+\)\|[a-fA-F0-9:]\+\)' | awk '{ sub(/inet6? (addr:)? ?/, \"\"); print }'"
# Get week number
alias week='date +%V'
# Stopwatch
alias timer='echo "Timer started. Stop with Ctrl-D." && date && time cat && date'
# View HTTP traffic
alias sniff="sudo ngrep -d 'en1' -t '^(GET|POST) ' 'tcp and port 80'"
alias httpdump="sudo tcpdump -i en1 -n -s 0 -w - | grep -a -o -E \"Host\: .*|GET \/.*\""
# Canonical hex dump; some systems have this symlinked
command -v hd > /dev/null || alias hd="hexdump -C"
# OS X has no `md5sum`, so use `md5` as a fallback
command -v md5sum > /dev/null || alias md5sum="md5"
# OS X has no `sha1sum`, so use `shasum` as a fallback
command -v sha1sum > /dev/null || alias sha1sum="shasum"
# Empty the Trash on all mounted volumes and the main HDD.
# Also, clear Apple’s System Logs to improve shell startup speed.
# Finally, clear download history from quarantine. https://mths.be/bum
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl; sqlite3 ~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV* 'delete from LSQuarantineEvent'"
# URL-encode strings
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
# Make Grunt print stack traces by default
command -v grunt > /dev/null && alias grunt="grunt --stack"
# Stuff I never really use but cannot delete either because of http://xkcd.com/530/
#alias stfu="osascript -e 'set volume output muted true'"
#alias pumpitup="osascript -e 'set volume 7'"
if [ $OSTYPE = "Darwin" ]; then
alias afk="/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend"
fi
# Reload the shell (i.e. invoke as a login shell)
alias reload="exec $SHELL -l"
unset OSTYPE
|
weitingchou/dotfiles
|
init/oh-my-zsh/custom/aliases.zsh
|
Shell
|
mit
| 2,495 |
#!/bin/bash
python RunSimulation.py --Geo 100.0 --sim_num 41
|
xji3/IGCCodonSimulation
|
ShFiles/YDR418W_YEL054C_IGCgeo_100.0_sim_41.sh
|
Shell
|
mit
| 61 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3060-1
#
# Security announcement date: 2016-08-10 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:34 UTC
#
# Operating System: Ubuntu 16.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libgd3:2.1.1-4ubuntu0.16.04.3
#
# Last versions recommanded by security team:
# - libgd3:2.1.1-4ubuntu0.16.04.5
#
# CVE List:
# - CVE-2016-6132
# - CVE-2016-6214
# - CVE-2016-6207
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libgd3=2.1.1-4ubuntu0.16.04.5 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_16.04_LTS/i386/2016/USN-3060-1.sh
|
Shell
|
mit
| 672 |
sudo journalctl -ru status-io
|
coderaiser/status-io
|
service/log.sh
|
Shell
|
mit
| 31 |
# Set other ENV variables
export EDITOR=vim
export VISUAL=vim
export PAGER=less
if [[ -z "$LANG" ]]; then
export LANG='en_US.UTF-8'
fi
# Need this to sign things
export GPG_TTY=`tty`
|
openfirmware/dotfiles
|
user/env.zsh
|
Shell
|
mit
| 187 |
#!/bin/sh
# Copyright (C) 1999-2004 Hallvar Helleseth ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of the Software, its documentation and marketing & publicity
# materials, and acknowledgment shall be given in the documentation, materials
# and software packages that this Software was used.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Author: Hallvar Helleseth ([email protected]) - Not that I'm proud ;)
# Instructions: move the top window and see what happends to the lower one.
#
# Open dialog 1
eesh -e "dialog_ok Move this window"
usleep 100000
eesh -e "wop Message title Dlg1"
usleep 100000
# Open dialog 2
eesh -e "dialog_ok Watch me follow the above window"
usleep 100000
eesh -e "wop Message title Dlg2"
usleep 100000
# In one endless loop, get window position of the first window, then move the
# second one accordingly...
while true
do
# Get position
pos=`eesh wop Dlg1 move "?"`
a=0
for i in $pos;do
a=$(($a + 1))
if [ $a = 3 ];then
xpos=$i
fi
if [ $a = 4 ];then
ypos=$i
fi
done
# Move the second window to the new position
eesh wop Dlg2 move $xpos $(($ypos + 74))
done
|
burzumishi/e16
|
sample-scripts/lcdmover.sh
|
Shell
|
mit
| 2,043 |
#!/bin/bash
usage="Usage: $0 <path-to-binary> <path-to-input-test-dir>"
default="\e[0m"
green="\e[0;32m"
red="\e[0;31m"
msg_fail="FAIL"
msg_pass="PASS"
msg_fail_con=$red$msg_fail$default
msg_pass_con=$green$msg_pass$default
TEST_STATUS=0
# Check number of parameters
if [[ "$#" -ne 2 ]]; then
echo $usage
exit 1
fi
# Check first parameter is the path to an executable file
translator=$1
if ! [[ -x $translator ]] || [[ -d $translator ]]; then
echo "$usage: given path to binary is not an executable file."
exit 1
fi
transdir=(`dirname $translator`) # Directory where temporary artefacts to be placed
# Check the second parameter is the path to a directory
suite=$2
if ! [[ -d $suite ]]; then
echo "$usage: given path to input tests is not a directory."
exit 1
fi
# Set up logging
ts=(`date "+%F-%H-%M-%S"`)
scriptdir=(`dirname ${BASH_SOURCE[0]}`)
logdir="$scriptdir/logs-translator"
mkdir -p $logdir
logfile="$logdir/$ts.log"
predemptyfile="$scriptdir/pred.promela"
touch $predemptyfile
touch $logfile
echo ""
echo "-------------------------------------------------------"
echo " PML-TO-PROMELA TRANSLATOR TESTS"
echo "-------------------------------------------------------"
# Set up counts
count_total=0
count_failed=0
# Process input tests
for dir in $2/*/
do
echo -e "[--- $dir ---]" >> $logfile
for filepath in $dir/*.pml
do
# Get all necessary file paths
pml_filename=(`basename $filepath`)
echo -e "PML file found: $pml_filename" >> $logfile
basename="${pml_filename%.*}"
expected_filepath="$dir$basename.pml.expected"
if ! [[ -f $expected_filepath ]]; then
echo -e "Error: expected promela file \"$expected_filepath\" not found, skipping.\n" >> $logfile
continue
fi
expected_filename=(`basename $expected_filepath`)
echo -e "Expected promela file found: $expected_filename" >> $logfile
count_total=$((count_total+1))
actual_filename="$scriptdir/$basename.pml.actual"
# Run program (convert PML to promela)
com="./$translator $dir$pml_filename $actual_filename $predemptyfile"
echo -e "Running test: $basename... " >> $logfile
echo -n "Running test: $basename... "
echo -e "-------" >> $logfile
$com >> $logfile
echo -e "-------" >> $logfile
# Translator fails to create promela file
if [[ ! -f $actual_filename ]]; then
echo -e "Error: no promela file created"
count_failed=$((count_failed+1))
echo $msg_fail >> $logfile
echo -e $msg_fail_con
continue
fi
# Compare output
result=(`diff -q -bBZ $expected_filepath $actual_filename`)
if [[ "$result" != "" ]]; then
echo -e "*** Expected and actual promela files differ ***" >> $logfile
count_failed=$((count_failed+1))
echo $msg_fail >> $logfile
echo -e $msg_fail_con
else
echo $msg_pass >> $logfile
echo -e $msg_pass_con
fi
# Clean up artefacts
if [[ -f $actual_filename ]]; then
rm $actual_filename
fi
echo >> $logfile
done
done
# Clean dummy predicate file
if [[ -f $predemptyfile ]]; then
rm $predemptyfile
fi
# Print summary to file and console
count_succeeded=$(($count_total-$count_failed))
summary="\nRESULTS: Total: $count_total, Failures: $count_failed"
echo -e $summary >> $logfile
echo -e $summary
echo -e "Log available at: $logfile\n"
exit $count_failed
|
CS4098/GroupProject
|
test-suite/runner-translator.sh
|
Shell
|
mit
| 3,258 |
gcc -c *.c
ar rc libft.a *.o
|
ayip001/42
|
piscine/d06/ex00/libft_creator.sh
|
Shell
|
mit
| 29 |
#!/bin/bash
# The MIT License (MIT)
#
# Copyright (c) 2015 Southern Nazarene University Computer Science/Network Engineering Department
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ******************************************************************************
# Ch. 03 - The Utilities
# Prepare some files and directories
# NOTE: This assumes that the utilities-lecture-files.tar file is located in
# your home directory.
cd ~
mkdir utilities-lecture
cd utilities-lecture
tar xf ../utilities-lecture-files.tar
# ==============================================================================
# How to Get Help
# Get help for the 'grep' command
grep --help
# Look at the man (manual) page for 'grep'
man grep
# Get information about 'grep'
info grep
# ==============================================================================
# Basic Utilities
# List the names of files
ls /usr/bin
# Dump the contents of a file to the screen
cat /etc/passwd
# Delete a file
# Note: The 'touch' command can create a blank file if the file doesn't exist
touch delete-me.txt
rm delete-me.txt
# Display a text file one screen at a time
more /etc/passwd
# Display a text file one screen at a time with the ability to go backwards
less /etc/passwd
# ==============================================================================
# Working with Files
# Copy a file
cp dir1/temp.txt dir2
# Move a file
mv dir1/temp.txt dir3/different.txt
# Search through files
grep home /etc/passwd
# Display the top 6 lines of a file
head -n 6 /etc/passwd
# Display the last 6 lines of a file
tail -n 6 /etc/passwd
# Display the sorted contents of a file
sort -r names.txt
# Display the contents of a file, skipping adjacent duplicate lines
# Note: Compare this output to 'cat names.txt'
uniq names.txt
# Compare 2 files and display differences
diff file1.txt file2.txt
# Test the contents of a given file
# ASCII text
file /etc/passwd
# Executable
file /bin/bash
# Directory
file ~
# ==============================================================================
# Piping
# Search for the word 'home' in /etc/passwd,
# remove everything but the 6th column using colons as column separators,
# list the contents of those directories, and
# display it to the screen
grep home /etc/passwd | awk -F ':' '{print $6;}' | ls -l | less
# Sort the names file and then remove the duplicates
sort names.txt | uniq
# ==============================================================================
# Miscellaneous Utilities
# Display a line of text
echo Greetings program
# Display the current date and time
date
# Display just the day of the week and the date
date "+%A %m-%d-%Y"
# Convert a DOS formatted file to Unix
dos2unix -n dos-file.txt dos-2-unix.txt
# Convert a Unix formatted file to DOS
unix2dos -n unix-file.txt unix-2-dos.txt
# Convert a Unix formatted file to Mac
unix2mac -n unix-file.txt unix-2-mac.txt
# ==============================================================================
# Compressing & Archiving Files
# Compress a file using the bzip compression algorithm
bzip2 uncompressed1.txt
# Decompress a file using the bzip compression algorithm
bunzip2 compressed1.txt.bz2
# Compress a file using the gzip compression algorithm
gzip uncompressed2.txt
# Decompress a file using the gzip compression algorithm
gunzip compressed2.txt.gz
# Combine all the files in the home directory into a single file and compress
# it using the gzip algorithm
tar cvfz /tmp/all-home-files.tar.gz ~/*
# ==============================================================================
# Locating Commands
# Locate where in the search path a utility is
which dos2unix
# Searches for a utility in standard locations, not just the search path
whereis dos2unix
# Searches for utilities related to a keyword
apropos password
# Searches for files on the local system with the given name
mlocate dos2unix
|
snucsne/CSNE-Course-Source-Code
|
CSNE3673-Net-Admin-I/video-examples/utilities.sh
|
Shell
|
mit
| 4,947 |
#!/bin/bash
declare -i sum=0
for i in {1..999}; do
{
echo $i | grep -E "*[05]$" || factor $i | grep -P "\d+:(\s\d)*(\s[35])(\s\d*)*$";
} > /dev/null && sum=$(( $sum + $i ))
done
echo "$sum"
|
HackspaceJena/project-euler
|
e0001/euler0001.bash
|
Shell
|
mit
| 211 |
script_dir=$(dirname "$(readlink -f "$0")")
export KB_DEPLOYMENT_CONFIG=$script_dir/../deploy.cfg
WD=/kb/module/work
if [ -f $WD/token ]; then
cat $WD/token | xargs sh $script_dir/../bin/run_kb_util_dylan_async_job.sh $WD/input.json $WD/output.json
else
echo "File $WD/token doesn't exist, aborting."
exit 1
fi
|
dcchivian/kb_util_dylan
|
scripts/run_async.sh
|
Shell
|
mit
| 323 |
#!/bin/sh
#set -x
#set -e
ifconfig
#echo "Enter device name > "
#read device
export device=$(ls /sys/class/net | grep -E 'enx')
# bring the USB virtual Ethernet interface up
#sudo /sbin/ip link set usb0 up
sudo /sbin/ip link set $device up
# set the host IP address
#sudo /sbin/ip addr add 10.0.0.2/24 dev usb0
sudo /sbin/ip addr add 10.0.0.2/24 dev $device
# enable masquerading for outgoing connections towards wireless interface
sudo /sbin/iptables -t nat -A POSTROUTING -s 10.0.0.1/32 -o enp0s3 -j MASQUERADE
# enable IP forwarding
echo "1" | sudo tee /proc/sys/net/ipv4/ip_forward
ssh usbarmory:[email protected]
#ssh root:[email protected]
# TODO: Passwort anpassen!!!!
|
daneflash/badusb
|
Host-Content/connectionSetup.sh
|
Shell
|
mit
| 679 |
#!/bin/sh
#set -x
d=${MOSAIC_BINDIR} # MOSAIC_BINDIR can be set in /etc/launchd.conf when using launch agents.
if [ -z "${d}" ] ; then
if [ -f ${PWD}/AppGridLeft ] ; then
d=${PWD}
else
d=${PWD}/../
fi
fi
bindir=${d} # see /etc/launchd.conf and http://www.dowdandassociates.com/blog/content/howto-set-an-environment-variable-in-mac-os-x-slash-etc-slash-launchd-dot-conf/
magickdir=${d}/../imagemagick/
infile=${1}
tile_width=${2}
tile_height=${3}
output_dir=${4}
raw_mosaic_dir=${5}
polaroid_dir=${6}
username=${7}
function log {
logfile=${bindir}/data/log/preprocess_right.log
if [ ! -f logfile ] ; then
touch ${logfile}
fi
dat=$(date +%Y.%m.%d.%H.%M.%S)
echo "${dat}: ${1}" >> ${logfile}
}
# Make sure the file exists.
if [ ! -f ${infile} ] ; then
log "Cannot find ${infile}"
exit
fi
filename=$(basename "$infile")
extension="${filename##*.}"
filename="${filename%.*}"
tmp_filename="${d}/tmp_${filename}.png"
png_filename="${d}png_${filename}.png"
grid_filepath=${output_dir}/${filename}.png
# Convert to png and make sure the input image is 640x640 because the positioning depends on these dimensions
${magickdir}/convert ${infile} \
-resize 640x640^ \
-gravity center \
${png_filename}
# Small Polaroid for left grid.
${bindir}/AppPolaroid \
-x 12 -y 60 -f ${png_filename} \
-r 0.0 -g 0.0 -b 0.0 \
-n "${username} " -s 35 -t 21 -w 11 \
-h "#TOPSHOPWINDOW" -i 32 -j 211 \
-a 180 -c ${bindir}/data/assets/polaroid_overlay_small.png \
-o ${tmp_filename}
cp ${tmp_filename} ${grid_filepath}
# Small polaroid for interaction
${bindir}/AppPolaroid \
-x 130 -y 115 -f ${png_filename} \
-r 0.0 -g 0.0 -b 0.0 \
-n "${username}" -s 66 -t 37 -w 11 \
-h "#TOPSHOPWINDOW" -i 63 -j 226 \
-a 180 -c ${bindir}/data/assets/polaroid_overlay_small_for_interaction.png \
-o ${tmp_filename}
cp ${tmp_filename} ${polaroid_dir}/${filename}.png
# Move to mosaic dir
mv ${infile} ${raw_mosaic_dir}/${filename}.${extension}
rm ${png_filename}
rm ${tmp_filename}
|
HellicarAndLewis/Mosaic
|
install/mac-clang-x86_64/bin/scripts/preprocess_right_grid.sh
|
Shell
|
mit
| 2,078 |
#!/bin/bash
# Font Awesome (version 4.7.0, 675 icons, 111 aliases)
# Codepoints: F000-F2E0 with holes
test -n "$__i_fa_loaded" && return || __i_fa_loaded=1
i='' i_fa_glass=$i
i='' i_fa_music=$i
i='' i_fa_search=$i
i='' i_fa_envelope_o=$i
i='' i_fa_heart=$i
i='' i_fa_star=$i
i='' i_fa_star_o=$i
i='' i_fa_user=$i
i='' i_fa_film=$i
i='' i_fa_th_large=$i
i='' i_fa_th=$i
i='' i_fa_th_list=$i
i='' i_fa_check=$i
i='' i_fa_times=$i
i_fa_close=$i_fa_times
i_fa_remove=$i_fa_times
i='' i_fa_search_plus=$i
i='' i_fa_search_minus=$i
i='' i_fa_power_off=$i
i='' i_fa_signal=$i
i='' i_fa_cog=$i
i_fa_gear=$i_fa_cog
i='' i_fa_trash_o=$i
i='' i_fa_home=$i
i='' i_fa_file_o=$i
i='' i_fa_clock_o=$i
i='' i_fa_road=$i
i='' i_fa_download=$i
i='' i_fa_arrow_circle_o_down=$i
i='' i_fa_arrow_circle_o_up=$i
i='' i_fa_inbox=$i
i='' i_fa_play_circle_o=$i
i='' i_fa_repeat=$i
i_fa_rotate_right=$i_fa_repeat
i='' i_fa_refresh=$i
i='' i_fa_list_alt=$i
i='' i_fa_lock=$i
i='' i_fa_flag=$i
i='' i_fa_headphones=$i
i='' i_fa_volume_off=$i
i='' i_fa_volume_down=$i
i='' i_fa_volume_up=$i
i='' i_fa_qrcode=$i
i='' i_fa_barcode=$i
i='' i_fa_tag=$i
i='' i_fa_tags=$i
i='' i_fa_book=$i
i='' i_fa_bookmark=$i
i='' i_fa_print=$i
i='' i_fa_camera=$i
i='' i_fa_font=$i
i='' i_fa_bold=$i
i='' i_fa_italic=$i
i='' i_fa_text_height=$i
i='' i_fa_text_width=$i
i='' i_fa_align_left=$i
i='' i_fa_align_center=$i
i='' i_fa_align_right=$i
i='' i_fa_align_justify=$i
i='' i_fa_list=$i
i='' i_fa_outdent=$i
i_fa_dedent=$i_fa_outdent
i='' i_fa_indent=$i
i='' i_fa_video_camera=$i
i='' i_fa_picture_o=$i
i_fa_image=$i_fa_picture_o
i_fa_photo=$i_fa_picture_o
i='' i_fa_pencil=$i
i='' i_fa_map_marker=$i
i='' i_fa_adjust=$i
i='' i_fa_tint=$i
i='' i_fa_pencil_square_o=$i
i_fa_edit=$i_fa_pencil_square_o
i='' i_fa_share_square_o=$i
i='' i_fa_check_square_o=$i
i='' i_fa_arrows=$i
i='' i_fa_step_backward=$i
i='' i_fa_fast_backward=$i
i='' i_fa_backward=$i
i='' i_fa_play=$i
i='' i_fa_pause=$i
i='' i_fa_stop=$i
i='' i_fa_forward=$i
i='' i_fa_fast_forward=$i
i='' i_fa_step_forward=$i
i='' i_fa_eject=$i
i='' i_fa_chevron_left=$i
i='' i_fa_chevron_right=$i
i='' i_fa_plus_circle=$i
i='' i_fa_minus_circle=$i
i='' i_fa_times_circle=$i
i='' i_fa_check_circle=$i
i='' i_fa_question_circle=$i
i='' i_fa_info_circle=$i
i='' i_fa_crosshairs=$i
i='' i_fa_times_circle_o=$i
i='' i_fa_check_circle_o=$i
i='' i_fa_ban=$i
i='' i_fa_arrow_left=$i
i='' i_fa_arrow_right=$i
i='' i_fa_arrow_up=$i
i='' i_fa_arrow_down=$i
i='' i_fa_share=$i
i_fa_mail_forward=$i_fa_share
i='' i_fa_expand=$i
i='' i_fa_compress=$i
i='' i_fa_plus=$i
i='' i_fa_minus=$i
i='' i_fa_asterisk=$i
i='' i_fa_exclamation_circle=$i
i='' i_fa_gift=$i
i='' i_fa_leaf=$i
i='' i_fa_fire=$i
i='' i_fa_eye=$i
i='' i_fa_eye_slash=$i
i='' i_fa_exclamation_triangle=$i
i_fa_warning=$i_fa_exclamation_triangle
i='' i_fa_plane=$i
i='' i_fa_calendar=$i
i='' i_fa_random=$i
i='' i_fa_comment=$i
i='' i_fa_magnet=$i
i='' i_fa_chevron_up=$i
i='' i_fa_chevron_down=$i
i='' i_fa_retweet=$i
i='' i_fa_shopping_cart=$i
i='' i_fa_folder=$i
i='' i_fa_folder_open=$i
i='' i_fa_arrows_v=$i
i='' i_fa_arrows_h=$i
i='' i_fa_bar_chart=$i
i_fa_bar_chart_o=$i_fa_bar_chart
i='' i_fa_twitter_square=$i
i='' i_fa_facebook_square=$i
i='' i_fa_camera_retro=$i
i='' i_fa_key=$i
i='' i_fa_cogs=$i
i_fa_gears=$i_fa_cogs
i='' i_fa_comments=$i
i='' i_fa_thumbs_o_up=$i
i='' i_fa_thumbs_o_down=$i
i='' i_fa_star_half=$i
i='' i_fa_heart_o=$i
i='' i_fa_sign_out=$i
i='' i_fa_linkedin_square=$i
i='' i_fa_thumb_tack=$i
i='' i_fa_external_link=$i
i='' i_fa_sign_in=$i
i='' i_fa_trophy=$i
i='' i_fa_github_square=$i
i='' i_fa_upload=$i
i='' i_fa_lemon_o=$i
i='' i_fa_phone=$i
i='' i_fa_square_o=$i
i='' i_fa_bookmark_o=$i
i='' i_fa_phone_square=$i
i='' i_fa_twitter=$i
i='' i_fa_facebook=$i
i_fa_facebook_f=$i_fa_facebook
i='' i_fa_github=$i
i='' i_fa_unlock=$i
i='' i_fa_credit_card=$i
i='' i_fa_rss=$i
i_fa_feed=$i_fa_rss
i='' i_fa_hdd_o=$i
i='' i_fa_bullhorn=$i
i='' i_fa_bell_o=$i
i='' i_fa_certificate=$i
i='' i_fa_hand_o_right=$i
i='' i_fa_hand_o_left=$i
i='' i_fa_hand_o_up=$i
i='' i_fa_hand_o_down=$i
i='' i_fa_arrow_circle_left=$i
i='' i_fa_arrow_circle_right=$i
i='' i_fa_arrow_circle_up=$i
i='' i_fa_arrow_circle_down=$i
i='' i_fa_globe=$i
i='' i_fa_wrench=$i
i='' i_fa_tasks=$i
i='' i_fa_filter=$i
i='' i_fa_briefcase=$i
i='' i_fa_arrows_alt=$i
i='' i_fa_users=$i
i_fa_group=$i_fa_users
i='' i_fa_link=$i
i_fa_chain=$i_fa_link
i='' i_fa_cloud=$i
i='' i_fa_flask=$i
i='' i_fa_scissors=$i
i_fa_cut=$i_fa_scissors
i='' i_fa_files_o=$i
i_fa_copy=$i_fa_files_o
i='' i_fa_paperclip=$i
i='' i_fa_floppy_o=$i
i_fa_save=$i_fa_floppy_o
i='' i_fa_square=$i
i='' i_fa_bars=$i
i_fa_navicon=$i_fa_bars
i_fa_reorder=$i_fa_bars
i='' i_fa_list_ul=$i
i='' i_fa_list_ol=$i
i='' i_fa_strikethrough=$i
i='' i_fa_underline=$i
i='' i_fa_table=$i
i='' i_fa_magic=$i
i='' i_fa_truck=$i
i='' i_fa_pinterest=$i
i='' i_fa_pinterest_square=$i
i='' i_fa_google_plus_square=$i
i='' i_fa_google_plus=$i
i='' i_fa_money=$i
i='' i_fa_caret_down=$i
i='' i_fa_caret_up=$i
i='' i_fa_caret_left=$i
i='' i_fa_caret_right=$i
i='' i_fa_columns=$i
i='' i_fa_sort=$i
i_fa_unsorted=$i_fa_sort
i='' i_fa_sort_desc=$i
i_fa_sort_down=$i_fa_sort_desc
i='' i_fa_sort_asc=$i
i_fa_sort_up=$i_fa_sort_asc
i='' i_fa_envelope=$i
i='' i_fa_linkedin=$i
i='' i_fa_undo=$i
i_fa_rotate_left=$i_fa_undo
i='' i_fa_gavel=$i
i_fa_legal=$i_fa_gavel
i='' i_fa_tachometer=$i
i_fa_dashboard=$i_fa_tachometer
i='' i_fa_comment_o=$i
i='' i_fa_comments_o=$i
i='' i_fa_bolt=$i
i_fa_flash=$i_fa_bolt
i='' i_fa_sitemap=$i
i='' i_fa_umbrella=$i
i='' i_fa_clipboard=$i
i_fa_paste=$i_fa_clipboard
i='' i_fa_lightbulb_o=$i
i='' i_fa_exchange=$i
i='' i_fa_cloud_download=$i
i='' i_fa_cloud_upload=$i
i='' i_fa_user_md=$i
i='' i_fa_stethoscope=$i
i='' i_fa_suitcase=$i
i='' i_fa_bell=$i
i='' i_fa_coffee=$i
i='' i_fa_cutlery=$i
i='' i_fa_file_text_o=$i
i='' i_fa_building_o=$i
i='' i_fa_hospital_o=$i
i='' i_fa_ambulance=$i
i='' i_fa_medkit=$i
i='' i_fa_fighter_jet=$i
i='' i_fa_beer=$i
i='' i_fa_h_square=$i
i='' i_fa_plus_square=$i
i='' i_fa_angle_double_left=$i
i='' i_fa_angle_double_right=$i
i='' i_fa_angle_double_up=$i
i='' i_fa_angle_double_down=$i
i='' i_fa_angle_left=$i
i='' i_fa_angle_right=$i
i='' i_fa_angle_up=$i
i='' i_fa_angle_down=$i
i='' i_fa_desktop=$i
i='' i_fa_laptop=$i
i='' i_fa_tablet=$i
i='' i_fa_mobile=$i
i_fa_mobile_phone=$i_fa_mobile
i='' i_fa_circle_o=$i
i='' i_fa_quote_left=$i
i='' i_fa_quote_right=$i
i='' i_fa_spinner=$i
i='' i_fa_circle=$i
i='' i_fa_reply=$i
i_fa_mail_reply=$i_fa_reply
i='' i_fa_github_alt=$i
i='' i_fa_folder_o=$i
i='' i_fa_folder_open_o=$i
i='' i_fa_smile_o=$i
i='' i_fa_frown_o=$i
i='' i_fa_meh_o=$i
i='' i_fa_gamepad=$i
i='' i_fa_keyboard_o=$i
i='' i_fa_flag_o=$i
i='' i_fa_flag_checkered=$i
i='' i_fa_terminal=$i
i='' i_fa_code=$i
i='' i_fa_reply_all=$i
i_fa_mail_reply_all=$i_fa_reply_all
i='' i_fa_star_half_o=$i
i_fa_star_half_empty=$i_fa_star_half_o
i_fa_star_half_full=$i_fa_star_half_o
i='' i_fa_location_arrow=$i
i='' i_fa_crop=$i
i='' i_fa_code_fork=$i
i='' i_fa_chain_broken=$i
i_fa_unlink=$i_fa_chain_broken
i='' i_fa_question=$i
i='' i_fa_info=$i
i='' i_fa_exclamation=$i
i='' i_fa_superscript=$i
i='' i_fa_subscript=$i
i='' i_fa_eraser=$i
i='' i_fa_puzzle_piece=$i
i='' i_fa_microphone=$i
i='' i_fa_microphone_slash=$i
i='' i_fa_shield=$i
i='' i_fa_calendar_o=$i
i='' i_fa_fire_extinguisher=$i
i='' i_fa_rocket=$i
i='' i_fa_maxcdn=$i
i='' i_fa_chevron_circle_left=$i
i='' i_fa_chevron_circle_right=$i
i='' i_fa_chevron_circle_up=$i
i='' i_fa_chevron_circle_down=$i
i='' i_fa_html5=$i
i='' i_fa_css3=$i
i='' i_fa_anchor=$i
i='' i_fa_unlock_alt=$i
i='' i_fa_bullseye=$i
i='' i_fa_ellipsis_h=$i
i='' i_fa_ellipsis_v=$i
i='' i_fa_rss_square=$i
i='' i_fa_play_circle=$i
i='' i_fa_ticket=$i
i='' i_fa_minus_square=$i
i='' i_fa_minus_square_o=$i
i='' i_fa_level_up=$i
i='' i_fa_level_down=$i
i='' i_fa_check_square=$i
i='' i_fa_pencil_square=$i
i='' i_fa_external_link_square=$i
i='' i_fa_share_square=$i
i='' i_fa_compass=$i
i='' i_fa_caret_square_o_down=$i
i_fa_toggle_down=$i_fa_caret_square_o_down
i='' i_fa_caret_square_o_up=$i
i_fa_toggle_up=$i_fa_caret_square_o_up
i='' i_fa_caret_square_o_right=$i
i_fa_toggle_right=$i_fa_caret_square_o_right
i='' i_fa_eur=$i
i_fa_euro=$i_fa_eur
i='' i_fa_gbp=$i
i='' i_fa_usd=$i
i_fa_dollar=$i_fa_usd
i='' i_fa_inr=$i
i_fa_rupee=$i_fa_inr
i='' i_fa_jpy=$i
i_fa_cny=$i_fa_jpy
i_fa_rmb=$i_fa_jpy
i_fa_yen=$i_fa_jpy
i='' i_fa_rub=$i
i_fa_rouble=$i_fa_rub
i_fa_ruble=$i_fa_rub
i='' i_fa_krw=$i
i_fa_won=$i_fa_krw
i='' i_fa_btc=$i
i_fa_bitcoin=$i_fa_btc
i='' i_fa_file=$i
i='' i_fa_file_text=$i
i='' i_fa_sort_alpha_asc=$i
i='' i_fa_sort_alpha_desc=$i
i='' i_fa_sort_amount_asc=$i
i='' i_fa_sort_amount_desc=$i
i='' i_fa_sort_numeric_asc=$i
i='' i_fa_sort_numeric_desc=$i
i='' i_fa_thumbs_up=$i
i='' i_fa_thumbs_down=$i
i='' i_fa_youtube_square=$i
i='' i_fa_youtube=$i
i='' i_fa_xing=$i
i='' i_fa_xing_square=$i
i='' i_fa_youtube_play=$i
i='' i_fa_dropbox=$i
i='' i_fa_stack_overflow=$i
i='' i_fa_instagram=$i
i='' i_fa_flickr=$i
i='' i_fa_adn=$i
i='' i_fa_bitbucket=$i
i='' i_fa_bitbucket_square=$i
i='' i_fa_tumblr=$i
i='' i_fa_tumblr_square=$i
i='' i_fa_long_arrow_down=$i
i='' i_fa_long_arrow_up=$i
i='' i_fa_long_arrow_left=$i
i='' i_fa_long_arrow_right=$i
i='' i_fa_apple=$i
i='' i_fa_windows=$i
i='' i_fa_android=$i
i='' i_fa_linux=$i
i='' i_fa_dribbble=$i
i='' i_fa_skype=$i
i='' i_fa_foursquare=$i
i='' i_fa_trello=$i
i='' i_fa_female=$i
i='' i_fa_male=$i
i='' i_fa_gratipay=$i
i_fa_gittip=$i_fa_gratipay
i='' i_fa_sun_o=$i
i='' i_fa_moon_o=$i
i='' i_fa_archive=$i
i='' i_fa_bug=$i
i='' i_fa_vk=$i
i='' i_fa_weibo=$i
i='' i_fa_renren=$i
i='' i_fa_pagelines=$i
i='' i_fa_stack_exchange=$i
i='' i_fa_arrow_circle_o_right=$i
i='' i_fa_arrow_circle_o_left=$i
i='' i_fa_caret_square_o_left=$i
i_fa_toggle_left=$i_fa_caret_square_o_left
i='' i_fa_dot_circle_o=$i
i='' i_fa_wheelchair=$i
i='' i_fa_vimeo_square=$i
i='' i_fa_try=$i
i_fa_turkish_lira=$i_fa_try
i='' i_fa_plus_square_o=$i
i='' i_fa_space_shuttle=$i
i='' i_fa_slack=$i
i='' i_fa_envelope_square=$i
i='' i_fa_wordpress=$i
i='' i_fa_openid=$i
i='' i_fa_university=$i
i_fa_bank=$i_fa_university
i_fa_institution=$i_fa_university
i='' i_fa_graduation_cap=$i
i_fa_mortar_board=$i_fa_graduation_cap
i='' i_fa_yahoo=$i
i='' i_fa_google=$i
i='' i_fa_reddit=$i
i='' i_fa_reddit_square=$i
i='' i_fa_stumbleupon_circle=$i
i='' i_fa_stumbleupon=$i
i='' i_fa_delicious=$i
i='' i_fa_digg=$i
i='' i_fa_pied_piper_pp=$i
i='' i_fa_pied_piper_alt=$i
i='' i_fa_drupal=$i
i='' i_fa_joomla=$i
i='' i_fa_language=$i
i='' i_fa_fax=$i
i='' i_fa_building=$i
i='' i_fa_child=$i
i='' i_fa_paw=$i
i='' i_fa_spoon=$i
i='' i_fa_cube=$i
i='' i_fa_cubes=$i
i='' i_fa_behance=$i
i='' i_fa_behance_square=$i
i='' i_fa_steam=$i
i='' i_fa_steam_square=$i
i='' i_fa_recycle=$i
i='' i_fa_car=$i
i_fa_automobile=$i_fa_car
i='' i_fa_taxi=$i
i_fa_cab=$i_fa_taxi
i='' i_fa_tree=$i
i='' i_fa_spotify=$i
i='' i_fa_deviantart=$i
i='' i_fa_soundcloud=$i
i='' i_fa_database=$i
i='' i_fa_file_pdf_o=$i
i='' i_fa_file_word_o=$i
i='' i_fa_file_excel_o=$i
i='' i_fa_file_powerpoint_o=$i
i='' i_fa_file_image_o=$i
i_fa_file_photo_o=$i_fa_file_image_o
i_fa_file_picture_o=$i_fa_file_image_o
i='' i_fa_file_archive_o=$i
i_fa_file_zip_o=$i_fa_file_archive_o
i='' i_fa_file_audio_o=$i
i_fa_file_sound_o=$i_fa_file_audio_o
i='' i_fa_file_video_o=$i
i_fa_file_movie_o=$i_fa_file_video_o
i='' i_fa_file_code_o=$i
i='' i_fa_vine=$i
i='' i_fa_codepen=$i
i='' i_fa_jsfiddle=$i
i='' i_fa_life_ring=$i
i_fa_life_bouy=$i_fa_life_ring
i_fa_life_buoy=$i_fa_life_ring
i_fa_life_saver=$i_fa_life_ring
i_fa_support=$i_fa_life_ring
i='' i_fa_circle_o_notch=$i
i='' i_fa_rebel=$i
i_fa_ra=$i_fa_rebel
i_fa_resistance=$i_fa_rebel
i='' i_fa_empire=$i
i_fa_ge=$i_fa_empire
i='' i_fa_git_square=$i
i='' i_fa_git=$i
i='' i_fa_hacker_news=$i
i_fa_y_combinator_square=$i_fa_hacker_news
i_fa_yc_square=$i_fa_hacker_news
i='' i_fa_tencent_weibo=$i
i='' i_fa_qq=$i
i='' i_fa_weixin=$i
i_fa_wechat=$i_fa_weixin
i='' i_fa_paper_plane=$i
i_fa_send=$i_fa_paper_plane
i='' i_fa_paper_plane_o=$i
i_fa_send_o=$i_fa_paper_plane_o
i='' i_fa_history=$i
i='' i_fa_circle_thin=$i
i='' i_fa_header=$i
i='' i_fa_paragraph=$i
i='' i_fa_sliders=$i
i='' i_fa_share_alt=$i
i='' i_fa_share_alt_square=$i
i='' i_fa_bomb=$i
i='' i_fa_futbol_o=$i
i_fa_soccer_ball_o=$i_fa_futbol_o
i='' i_fa_tty=$i
i='' i_fa_binoculars=$i
i='' i_fa_plug=$i
i='' i_fa_slideshare=$i
i='' i_fa_twitch=$i
i='' i_fa_yelp=$i
i='' i_fa_newspaper_o=$i
i='' i_fa_wifi=$i
i='' i_fa_calculator=$i
i='' i_fa_paypal=$i
i='' i_fa_google_wallet=$i
i='' i_fa_cc_visa=$i
i='' i_fa_cc_mastercard=$i
i='' i_fa_cc_discover=$i
i='' i_fa_cc_amex=$i
i='' i_fa_cc_paypal=$i
i='' i_fa_cc_stripe=$i
i='' i_fa_bell_slash=$i
i='' i_fa_bell_slash_o=$i
i='' i_fa_trash=$i
i='' i_fa_copyright=$i
i='' i_fa_at=$i
i='' i_fa_eyedropper=$i
i='' i_fa_paint_brush=$i
i='' i_fa_birthday_cake=$i
i='' i_fa_area_chart=$i
i='' i_fa_pie_chart=$i
i='' i_fa_line_chart=$i
i='' i_fa_lastfm=$i
i='' i_fa_lastfm_square=$i
i='' i_fa_toggle_off=$i
i='' i_fa_toggle_on=$i
i='' i_fa_bicycle=$i
i='' i_fa_bus=$i
i='' i_fa_ioxhost=$i
i='' i_fa_angellist=$i
i='' i_fa_cc=$i
i='' i_fa_ils=$i
i_fa_shekel=$i_fa_ils
i_fa_sheqel=$i_fa_ils
i='' i_fa_meanpath=$i
i='' i_fa_buysellads=$i
i='' i_fa_connectdevelop=$i
i='' i_fa_dashcube=$i
i='' i_fa_forumbee=$i
i='' i_fa_leanpub=$i
i='' i_fa_sellsy=$i
i='' i_fa_shirtsinbulk=$i
i='' i_fa_simplybuilt=$i
i='' i_fa_skyatlas=$i
i='' i_fa_cart_plus=$i
i='' i_fa_cart_arrow_down=$i
i='' i_fa_diamond=$i
i='' i_fa_ship=$i
i='' i_fa_user_secret=$i
i='' i_fa_motorcycle=$i
i='' i_fa_street_view=$i
i='' i_fa_heartbeat=$i
i='' i_fa_venus=$i
i='' i_fa_mars=$i
i='' i_fa_mercury=$i
i='' i_fa_transgender=$i
i_fa_intersex=$i_fa_transgender
i='' i_fa_transgender_alt=$i
i='' i_fa_venus_double=$i
i='' i_fa_mars_double=$i
i='' i_fa_venus_mars=$i
i='' i_fa_mars_stroke=$i
i='' i_fa_mars_stroke_v=$i
i='' i_fa_mars_stroke_h=$i
i='' i_fa_neuter=$i
i='' i_fa_genderless=$i
i='' i_fa_facebook_official=$i
i='' i_fa_pinterest_p=$i
i='' i_fa_whatsapp=$i
i='' i_fa_server=$i
i='' i_fa_user_plus=$i
i='' i_fa_user_times=$i
i='' i_fa_bed=$i
i_fa_hotel=$i_fa_bed
i='' i_fa_viacoin=$i
i='' i_fa_train=$i
i='' i_fa_subway=$i
i='' i_fa_medium=$i
i='' i_fa_y_combinator=$i
i_fa_yc=$i_fa_y_combinator
i='' i_fa_optin_monster=$i
i='' i_fa_opencart=$i
i='' i_fa_expeditedssl=$i
i='' i_fa_battery_full=$i
i_fa_battery=$i_fa_battery_full
i_fa_battery_4=$i_fa_battery_full
i='' i_fa_battery_three_quarters=$i
i_fa_battery_3=$i_fa_battery_three_quarters
i='' i_fa_battery_half=$i
i_fa_battery_2=$i_fa_battery_half
i='' i_fa_battery_quarter=$i
i_fa_battery_1=$i_fa_battery_quarter
i='' i_fa_battery_empty=$i
i_fa_battery_0=$i_fa_battery_empty
i='' i_fa_mouse_pointer=$i
i='' i_fa_i_cursor=$i
i='' i_fa_object_group=$i
i='' i_fa_object_ungroup=$i
i='' i_fa_sticky_note=$i
i='' i_fa_sticky_note_o=$i
i='' i_fa_cc_jcb=$i
i='' i_fa_cc_diners_club=$i
i='' i_fa_clone=$i
i='' i_fa_balance_scale=$i
i='' i_fa_hourglass_o=$i
i='' i_fa_hourglass_start=$i
i_fa_hourglass_1=$i_fa_hourglass_start
i='' i_fa_hourglass_half=$i
i_fa_hourglass_2=$i_fa_hourglass_half
i='' i_fa_hourglass_end=$i
i_fa_hourglass_3=$i_fa_hourglass_end
i='' i_fa_hourglass=$i
i='' i_fa_hand_rock_o=$i
i_fa_hand_grab_o=$i_fa_hand_rock_o
i='' i_fa_hand_paper_o=$i
i_fa_hand_stop_o=$i_fa_hand_paper_o
i='' i_fa_hand_scissors_o=$i
i='' i_fa_hand_lizard_o=$i
i='' i_fa_hand_spock_o=$i
i='' i_fa_hand_pointer_o=$i
i='' i_fa_hand_peace_o=$i
i='' i_fa_trademark=$i
i='' i_fa_registered=$i
i='' i_fa_creative_commons=$i
i='' i_fa_gg=$i
i='' i_fa_gg_circle=$i
i='' i_fa_tripadvisor=$i
i='' i_fa_odnoklassniki=$i
i='' i_fa_odnoklassniki_square=$i
i='' i_fa_get_pocket=$i
i='' i_fa_wikipedia_w=$i
i='' i_fa_safari=$i
i='' i_fa_chrome=$i
i='' i_fa_firefox=$i
i='' i_fa_opera=$i
i='' i_fa_internet_explorer=$i
i='' i_fa_television=$i
i_fa_tv=$i_fa_television
i='' i_fa_contao=$i
i='' i_fa_500px=$i
i='' i_fa_amazon=$i
i='' i_fa_calendar_plus_o=$i
i='' i_fa_calendar_minus_o=$i
i='' i_fa_calendar_times_o=$i
i='' i_fa_calendar_check_o=$i
i='' i_fa_industry=$i
i='' i_fa_map_pin=$i
i='' i_fa_map_signs=$i
i='' i_fa_map_o=$i
i='' i_fa_map=$i
i='' i_fa_commenting=$i
i='' i_fa_commenting_o=$i
i='' i_fa_houzz=$i
i='' i_fa_vimeo=$i
i='' i_fa_black_tie=$i
i='' i_fa_fonticons=$i
i='' i_fa_reddit_alien=$i
i='' i_fa_edge=$i
i='' i_fa_credit_card_alt=$i
i='' i_fa_codiepie=$i
i='' i_fa_modx=$i
i='' i_fa_fort_awesome=$i
i='' i_fa_usb=$i
i='' i_fa_product_hunt=$i
i='' i_fa_mixcloud=$i
i='' i_fa_scribd=$i
i='' i_fa_pause_circle=$i
i='' i_fa_pause_circle_o=$i
i='' i_fa_stop_circle=$i
i='' i_fa_stop_circle_o=$i
i='' i_fa_shopping_bag=$i
i='' i_fa_shopping_basket=$i
i='' i_fa_hashtag=$i
i='' i_fa_bluetooth=$i
i='' i_fa_bluetooth_b=$i
i='' i_fa_percent=$i
i='' i_fa_gitlab=$i
i='' i_fa_wpbeginner=$i
i='' i_fa_wpforms=$i
i='' i_fa_envira=$i
i='' i_fa_universal_access=$i
i='' i_fa_wheelchair_alt=$i
i='' i_fa_question_circle_o=$i
i='' i_fa_blind=$i
i='' i_fa_audio_description=$i
i='' i_fa_volume_control_phone=$i
i='' i_fa_braille=$i
i='' i_fa_assistive_listening_systems=$i
i='' i_fa_american_sign_language_interpreting=$i
i_fa_asl_interpreting=$i_fa_american_sign_language_interpreting
i='' i_fa_deaf=$i
i_fa_deafness=$i_fa_deaf
i_fa_hard_of_hearing=$i_fa_deaf
i='' i_fa_glide=$i
i='' i_fa_glide_g=$i
i='' i_fa_sign_language=$i
i_fa_signing=$i_fa_sign_language
i='' i_fa_low_vision=$i
i='' i_fa_viadeo=$i
i='' i_fa_viadeo_square=$i
i='' i_fa_snapchat=$i
i='' i_fa_snapchat_ghost=$i
i='' i_fa_snapchat_square=$i
i='' i_fa_pied_piper=$i
i='' i_fa_first_order=$i
i='' i_fa_yoast=$i
i='' i_fa_themeisle=$i
i='' i_fa_google_plus_official=$i
i_fa_google_plus_circle=$i_fa_google_plus_official
i='' i_fa_font_awesome=$i
i_fa_fa=$i_fa_font_awesome
i='' i_fa_handshake_o=$i
i='' i_fa_envelope_open=$i
i='' i_fa_envelope_open_o=$i
i='' i_fa_linode=$i
i='' i_fa_address_book=$i
i='' i_fa_address_book_o=$i
i='' i_fa_address_card=$i
i_fa_vcard=$i_fa_address_card
i='' i_fa_address_card_o=$i
i_fa_vcard_o=$i_fa_address_card_o
i='' i_fa_user_circle=$i
i='' i_fa_user_circle_o=$i
i='' i_fa_user_o=$i
i='' i_fa_id_badge=$i
i='' i_fa_id_card=$i
i_fa_drivers_license=$i_fa_id_card
i='' i_fa_id_card_o=$i
i_fa_drivers_license_o=$i_fa_id_card_o
i='' i_fa_quora=$i
i='' i_fa_free_code_camp=$i
i='' i_fa_telegram=$i
i='' i_fa_thermometer_full=$i
i_fa_thermometer=$i_fa_thermometer_full
i_fa_thermometer_4=$i_fa_thermometer_full
i='' i_fa_thermometer_three_quarters=$i
i_fa_thermometer_3=$i_fa_thermometer_three_quarters
i='' i_fa_thermometer_half=$i
i_fa_thermometer_2=$i_fa_thermometer_half
i='' i_fa_thermometer_quarter=$i
i_fa_thermometer_1=$i_fa_thermometer_quarter
i='' i_fa_thermometer_empty=$i
i_fa_thermometer_0=$i_fa_thermometer_empty
i='' i_fa_shower=$i
i='' i_fa_bath=$i
i_fa_bathtub=$i_fa_bath
i_fa_s15=$i_fa_bath
i='' i_fa_podcast=$i
i='' i_fa_window_maximize=$i
i='' i_fa_window_minimize=$i
i='' i_fa_window_restore=$i
i='' i_fa_window_close=$i
i_fa_times_rectangle=$i_fa_window_close
i='' i_fa_window_close_o=$i
i_fa_times_rectangle_o=$i_fa_window_close_o
i='' i_fa_bandcamp=$i
i='' i_fa_grav=$i
i='' i_fa_etsy=$i
i='' i_fa_imdb=$i
i='' i_fa_ravelry=$i
i='' i_fa_eercast=$i
i='' i_fa_microchip=$i
i='' i_fa_snowflake_o=$i
i='' i_fa_superpowers=$i
i='' i_fa_wpexplorer=$i
i='' i_fa_meetup=$i
unset i
|
goude/runcom
|
utils/nerd-fonts-scripts/i_fa.sh
|
Shell
|
mit
| 22,018 |
#!/bin/bash
# Encrypts backups and sends them to remote
#function usage {
# printf "%s\n" "To use:"
# printf "%s %s\n" "Setup:" "$0 setup /path/to/private.key"
# printf "%s %s\n" "Encrypt a file:" "$0 encrypt key.pub.pem filename"
# printf "%s %s\n" "Decrypt a file:" "$0 encrypt key.pem filename"
#}
function get_full_path {
# Gets absolute path of a file
# From https://gist.github.com/TheMengzor/968e5ea87e99d9c41782
if [ -z $1 ]; then
echo "First argument must be a filename"
return 1
fi
local file_path=$1
while [ -h "$file_path" ]; do
local real_dir="$(cd -P "$(dirname "$file_path")" && pwd)"
local file_path="$(readlink "$file_path")"
[[ $file_path != /* ]] && file_path="$real_dir/$file_path"
done
echo "$(cd "$(dirname "$file_path")"; pwd)/$(basename "$file_path")"
}
script_path=`get_full_path ${BASH_SOURCE[0]}`
script_dir=`dirname $script_path`
default_config=$script_dir/enc-backup.conf
#source $script_dir/lib/init_from_config.sh
config=$HOME/.enc-backup.conf
if [ -e $config ]; then
# Set settings from config file
# Taken from:
# http://mywiki.wooledge.org/glob
# http://stackoverflow.com/a/20815951
# TODO: does this work in shells other than Bash?
shopt -q extglob; extglob_set=$?
((extglob_set)) && shopt -s extglob
tr -d '\r' < $config > $config.unix
while IFS='= ' read lhs rhs
do
if [[ ! $lhs =~ ^\ *# && -n $lhs ]]; then
rhs="${rhs%%\#*}" # Del in line right comments
rhs="${rhs%%*( )}" # Del trailing spaces
rhs="${rhs%\"*}" # Del opening string quotes
rhs="${rhs#\"*}" # Del closing string quotes
declare $lhs="$rhs"
fi
done < $config.unix
# Clean up after ourselves
((extglob_set)) && shopt -u extglob
rm $config.unix
else
echo "Error: config file has not been created"
fi
function sanitize_file_arg {
if [ -z "$1" ]; then
echo "Error: argument must be file name"
exit 1
fi
local file=`get_full_path $1`
if [ ! -e "$file" ]; then
echo "Error: file does not exist"
exit 1
fi
echo $file
}
function get_backup_path {
# Get the project-name/folder path of a file
# This path begins from $backup_root
local full_path=`sanitize_file_arg $1`
if [ "${full_path/$backup_root}" == "$full_path" ]; then
echo "Error: file is not located in backup folder"
else
local backup_path=${full_path/$backup_root}
echo ${backup_path:1}
fi
}
function move_to_sync_folder {
# Moves encrypted file to local sync folder
# From there, it can be rsynced with remote
local full_path=`sanitize_file_arg $1`
if [ ! -e $full_path.enc ]; then
echo "Error: file $1.enc does not exist"
echo "Please encrypt file before moving it"
exit 1
fi
mv $full_path.enc $enc_src/`get_backup_path $full_path`.enc
}
# Encrypt/decrypt adapted from http://www.czeskis.com/random/openssl-encrypt-file.html
function encrypt {
local file_name=`sanitize_file_arg $1`
if openssl enc -aes-256-cbc -salt -in $file_name -out $file_name.enc -pass file:$key_file; then
printf "%s\n" "Created ${file_name}.enc"
else
echo "Error: could not create ${file_name}.enc"
fi
}
function decrypt {
local enc_file=`sanitize_file_arg $1`
local decrypted_file=${enc_file:0:-4}
if openssl enc -d -aes-256-cbc -in $enc_file -out $decrypted_file -pass file:$key_file; then
echo "File has been decrypted. See $decrypted_file"
else
echo "Error: could not decrypt file"
fi
}
function sync_encrypted {
# Syncs local encrypted files with remote encrypted files
rsync -a \
--verbose \
--progress \
$enc_src/ \
$enc_dest
}
function get_from_remote {
# Restores encrypted files from remote folder
rsync -a \
--verbose \
--progress \
$enc_dest/ \
$enc_src
}
#function create_conf_file {
# # Currently, this assumes that the base configuration
# # file is in the same directory as the script
#
# if [ -f "$config_file" ]; then
# echo "$config_file already exists; I will not overwrite it."
# exit 1
# fi
#
# # Get full path of script
# script_path="${BASH_SOURCE[0]}"
#
# # If running script from a symlink, resolve it
# while [ -h "$script_path" ]; do
# real_dir="$(cd -P "$(dirname "$script_path")" && pwd)"
# script_path="$(readlink "$script_path")"
# [[ $script_path != /* ]] && script_path="$real_dir/$script_path"
# done
#
# if [ -z $real_dir ]; then
# real_dir="$(cd -P "$(dirname "$script_path")" && pwd)"
# fi
#
# if [ -f $real_dir/www-db-backup.conf ]; then
# cp $real_dir/www-db-backup.conf $config_file
# else
# echo "Error: $real_dir/www-db-backup.conf does not exist."
# exit 1
# fi
#}
#
#function make_pem_files {
# if [ -z $1 ]; then
# echo "Error: no private key specified."
# return 1
# fi
#
# local private_key=`get_real_path $1`
#
# if [ ! -f $private_key ]; then
# echo "Error: $private_key does not exist"
# return 1
# fi
#
# if [ -f "${private_key}.pem" ]; then
# echo "${private_key}.pem already exists; I will not overwrite it."
# else
# openssl rsa -in $private_key -outform pem > ${private_key}.pem
# fi
#
# if [ -f "${private_key}.pub.pem" ]; then
# echo "${private_key}.pub.pem already exists; I will not overwrite it."
# else
# openssl rsa -in $private_key -pubout -outform pem > ${private_key}.pub.pem
# fi
#
# if [ -f "${private_key}-key.bin" ]; then
# echo "${private_key}-key.bin already exists; I will not overwrite it."
# else
# openssl rand -base64 32 > ${private_key}-key.bin
# fi
#
# if [ -f ${private_key}.pem ] \
# && [ -f ${private_key}.pub.pem ] \
# && [ -f ${private_key}-key.bin ] \
# ; then
# printf "%s\n" "Files created:"
# printf "%s\n" "${private_key}.pem"
# printf "%s\n" "${private_key}.pub.pem"
# printf "%s\n" "${private_key}-key.bin"
# printf "%s\n\n" "$config_file"
# printf "%s\n" "Change $config_file to match your configuration, then run:"
# printf "\t%s\n" "$0 configure"
# printf "%s\n" "to finish setup."
# fi
#}
#
#function setup {
# make_pem_files $1
# create_conf_file
#}
#
#
#function create_folders {
# local destination=$1
# local project_string=$2
# OLD_IFS=$IFS
# IFS=','
# local project_names=$project_string
#
# # First, create the main folder
# if [ ! -d $destination ]; then
# mkdir $destination
# fi
#
# for current_project in $project_names; do
# echo "From create_folders: $current_project"
# done
#
# # Then set up folders for each project
# for current_project in $project_names; do
# if [ ! -d $destination/$current_project ]; then
# echo "Creating folders for $current_project"
# echo "Creating $destination/$current_project/production"
# mkdir -p $destination/$current_project/production
# echo "Creating $destination/$current_project/development"
# mkdir $destination/$current_project/development
# else
# echo "Folders for $current_project already exist"
# fi
# done
# IFS=$OLD_IFS
#}
#
#function configure_remote {
# local destination=$1
# local project_names=$2
# local divider_index=`expr index "$destination" ':'`
# local remote_machine=${destination:0: ${divider_index}-1}
# destination=${destination:${divider_index}}
#
# ssh $remote_machine "$(typeset -f); create_folders $destination \"$project_names\""
#}
#
#function run_config_file {
# if [ ! -z $1 ]; then
# config_file=`get_real_path $1`
# fi
#
# local line_counter=1
# local config_section
# local project_names
# while read line; do
# # Skip comments and empty lines
# if [[ "$line" =~ ^#.*$ ]] \
# || [ -z "$line" ]\
# ; then
# continue
# fi
#
# # What section am I on?
# if [[ "$line" =~ ^\[.*\]$ ]]; then
# config_section=${line:1: -1}
# continue
# fi
#
# if [ "$config_section" == "Destination" ]; then
# local destination=$line
# elif [ "$config_section" == "Project Names" ]; then
# if [ -z $project_names ]; then
# project_names=$line
# else
# project_names="$project_names,$line"
# fi
# fi
#
# ((line_counter++))
# done < $config_file
#
# if [[ $destination == *"@"* ]]; then
# configure_remote $destination $project_names
# else
# create_folders $destination $project_names
# fi
#}
#
#function encrypt_file {
#
# local public_pem=$1
# local key_file=$2
# local file_name=$3
#
# if [ ! -f $public_pem ]; then
# echo "$public_pem does not exist."
# return 1
# fi
#
# if [ ! -f $key_file ]; then
# echo "$key_file does not exist."
# return 1
# fi
#
# if [ ! -f $file_name ]; then
# echo "$file_name does not exist."
# return 1
# fi
#
# if openssl rsautl -encrypt -inkey $public_pem -pubin -in $key_file -out ${key_file}.enc \
# && openssl enc -aes-256-cbc -salt -in $file_name -out ${file_name}.enc -pass file:$key_file; then
# printf "%s\n" "Created ${key_file}.enc ${file_name}.enc."
# else
# echo "Error: could not create ${key_file}.enc ${file_name}.enc."
# fi
#}
#
#function get_config_list() {
# if [ -z "$1" ]; then
# echo "No config file specified"
# exit 1
# fi
#
# if [ -z "$2" ]; then
# echo "No section specified"
# exit 1
# fi
#
# local config=$1
# local section=$2
# local found_them=false
# local line_counter=1
# local list_holder=''
# while read line; do
# # Don't need comments and empty lines
# if [ "${line:0:1}" == '#' ] || [ -z "$line" ]; then
# continue
# fi
#
# # Get the lines from the right category
# if [ $found_them == true ] && [ "${line:0:1}" != '[' ]; then
# list_holder="$list_holder $line"
# elif [ "$line" == "$section" ]; then
# found_them=true
# else
# continue
# fi
# ((line_counter++))
# done < $config
# echo $list_holder
#}
#
## In development
#function sync_backups {
# if [ -z $1 ]; then
# echo "Error: first argument must be file_name.enc"
# return 1
# fi
#
# dest=`get_config_list $HOME/.www-db-backup.conf "[Destination]"`
# backup_root=`get_config_list $HOME/.www-db-backup.conf "[Root Folder]"`
#
# # TODO: send the encrypted file to Gerhard
# #rsync -a \
# # --progress \
#
#}
#
#function backup_and_store {
# if [ -z $1 ]; then
# echo "Error: first argument must be file_name.pub.pem"
# return 1
# fi
#
# if [ -z $2 ]; then
# echo "Error: no file specified."
# return 1
# fi
#
# local public_pem=`get_real_path $1`
# local key_file=`get_real_path ${public_pem:0: -8}`
# local file_name=`get_real_path $2`
#
# # TODO: get stuff like this into an object that is initialized when
# # the script is run
# backup_root=`get_config_list $HOME/.www-db-backup.conf "[Root Folder]"`
#
# # Get project name
# OLD_IFS=$IFS
# IFS='/'
# split_path=$file_name
# for dir in $split_path; do
# echo $dir
# done
# IFS=$OLD_IFS
#
# #if encrypt_file $public_pem $key_file $file_name; then
# # mv ${file_name}.enc $backup_root
# # sync_backups
# #fi
#}
#
#function decrypt_file {
# if [ -z $1 ]; then
# echo "Error: first argument must be file_name.pem"
# return 1
# fi
#
# if [ -z $2 ]; then
# echo "Error: no file specified."
# return 1
# fi
#
# local pem=`get_real_path $1`
# local file_name=`get_real_path $2`
#
# if [ ! -f $pem]; then
# echo "$pem does not exist"
# return 1
# fi
#
# if [ ! -f $file_name ]; then
# echo "$file_name does not exist"
# return 1
# fi
#
# openssl rsautl -decrypt -inkey $pem -in $file_name -out ${file_name:0: -4}
#}
#
#case "$1" in
# encrypt)
# backup_and_store $2 $3
# ;;
# decrypt)
# decrypt_file $2 $3
# ;;
# test)
# # run_config_file $2
# # sync_backups
# backup_and_store $2 $3
# ;;
# *)
# usage
#esac
exit 0
|
denise-lhirondelle/enc-backup.sh
|
enc-backup.sh
|
Shell
|
mit
| 12,752 |
#!/bin/sh
nohup ./run.sh &
|
fitGroup/Guide-Rock-server
|
run-in-bg.sh
|
Shell
|
mit
| 27 |
#!/bin/bash
#
# This script will install Travis test dependencies.
#
function travis_unittest()
{
# Install general build dependencies
cd $CACHED_FOLDER
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
source $TRAVIS_BUILD_DIR/tools/linux-install-deps.sh -y -u -x -s
else
# Does not install non-pypi libraries:
# corresponding tests will be skipped
type python
python -m pip install --upgrade pip --user
python --version
pip --version
python -m pip install --upgrade setuptools --user
python -m pip install --upgrade wheel --user
python -m pip install pybind11 --user
python -m pip install -r $TRAVIS_BUILD_DIR/requirements.txt --user
python -m pip install -r $TRAVIS_BUILD_DIR/requirements-dev.txt --user
fi
}
function travis_styletest()
{
cd $CACHED_FOLDER
python -m pip install flake8
}
function main()
{
if [[ ${TRAVISRUN} == "unit" ]]; then
travis_unittest
elif [[ ${TRAVISRUN} == "style" ]]; then
travis_styletest
else
echo "No dependencies to be installed"
fi
# Print Python info
echo "Python information:"
python $TRAVIS_BUILD_DIR/ci/info_platform.py
python -m pip list
}
main
|
woutdenolf/spectrocrunch
|
ci/travis-install-deps.sh
|
Shell
|
mit
| 1,268 |
#!/bin/sh
#
# deal.II
# http://www.dealii.org/
module load openblas
module load lapack
module load suitesparse
module load trilinos-serial
source ./helper.sh $*
set_stage_dl https://github.com/dealii/dealii/releases/download/v8.2.1/dealii-8.2.1.tar.gz
# Build shared libraries
mkdir build_shared
cd build_shared
cmake \
-D CMAKE_INSTALL_PREFIX:PATH=$PREFIX/deal.II-8.2.1-serial \
-D DEAL_II_WITH_TRILINOS:BOOL=ON \
-D TRILINOS_DIR=$PREFIX/trilinos/serial-opt-shared \
..
make
make install
leave_stage
|
cornell-cs5220-f15/totient-pkg
|
configs/dealii.sh
|
Shell
|
mit
| 514 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2010:0839
#
# Security announcement date: 2010-11-09 18:11:07 UTC
# Script generation date: 2017-01-01 21:12:52 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - kernel-doc.noarch:2.6.18-194.26.1.el5
# - kernel.x86_64:2.6.18-194.26.1.el5
# - kernel-debug.x86_64:2.6.18-194.26.1.el5
# - kernel-debug-debuginfo.x86_64:2.6.18-194.26.1.el5
# - kernel-debug-devel.x86_64:2.6.18-194.26.1.el5
# - kernel-debuginfo.x86_64:2.6.18-194.26.1.el5
# - kernel-debuginfo-common.x86_64:2.6.18-194.26.1.el5
# - kernel-devel.x86_64:2.6.18-194.26.1.el5
# - kernel-headers.x86_64:2.6.18-194.26.1.el5
# - kernel-xen.x86_64:2.6.18-194.26.1.el5
# - kernel-xen-debuginfo.x86_64:2.6.18-194.26.1.el5
# - kernel-xen-devel.x86_64:2.6.18-194.26.1.el5
#
# Last versions recommanded by security team:
# - kernel-doc.noarch:2.6.18-194.26.1.el5
# - kernel.x86_64:2.6.18-417.el5
# - kernel-debug.x86_64:2.6.18-417.el5
# - kernel-debug-debuginfo.x86_64:2.6.18-417.el5
# - kernel-debug-devel.x86_64:2.6.18-417.el5
# - kernel-debuginfo.x86_64:2.6.18-417.el5
# - kernel-debuginfo-common.x86_64:2.6.18-417.el5
# - kernel-devel.x86_64:2.6.18-417.el5
# - kernel-headers.x86_64:2.6.18-417.el5
# - kernel-xen.x86_64:2.6.18-417.el5
# - kernel-xen-debuginfo.x86_64:2.6.18-417.el5
# - kernel-xen-devel.x86_64:2.6.18-417.el5
#
# CVE List:
# - CVE-2010-3066
# - CVE-2010-3067
# - CVE-2010-3078
# - CVE-2010-3086
# - CVE-2010-3477
# - CVE-2010-2963
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install kernel-doc.noarch-2.6.18 -y
sudo yum install kernel.x86_64-2.6.18 -y
sudo yum install kernel-debug.x86_64-2.6.18 -y
sudo yum install kernel-debug-debuginfo.x86_64-2.6.18 -y
sudo yum install kernel-debug-devel.x86_64-2.6.18 -y
sudo yum install kernel-debuginfo.x86_64-2.6.18 -y
sudo yum install kernel-debuginfo-common.x86_64-2.6.18 -y
sudo yum install kernel-devel.x86_64-2.6.18 -y
sudo yum install kernel-headers.x86_64-2.6.18 -y
sudo yum install kernel-xen.x86_64-2.6.18 -y
sudo yum install kernel-xen-debuginfo.x86_64-2.6.18 -y
sudo yum install kernel-xen-devel.x86_64-2.6.18 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2010/RHSA-2010:0839.sh
|
Shell
|
mit
| 2,320 |
sudo docker build -t dckreg:5000/kube-proxy-amd64:v1.5.2 .
sudo docker push dckreg:5000/kube-proxy-amd64:v1.5.2
|
reza-rahim/microservice
|
ansible/provision/docker/kube-proxy/build.sh
|
Shell
|
mit
| 113 |
p=`pwd`
echo "clone coreos-vagrant via go get..."
go get github.com/coreos/coreos-vagrant
echo "cp setting files..."
cp -f ./config.rb $GOPATH/src/github.com/coreos/coreos-vagrant/
cp -f ./user-data.yml $GOPATH/src/github.com/coreos/coreos-vagrant/user-data
echo "up vms..."
cd $GOPATH/src/github.com/coreos/coreos-vagrant
vagrant up
echo "for ssh-agent, run \"ssh-add ~/.vagrant.d/insecure_private_key\""
echo "for fleetctl command, run \"source export.sh\""
cd $p
echo "end."
|
ntk1000/microservice-sandbox
|
infra/coreos/coreos-vagrant-setup.sh
|
Shell
|
mit
| 482 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2968-1
#
# Security announcement date: 2014-06-27 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:58 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - gnupg2:2.0.19-2+deb7u2
# - gnupg-agent:2.0.19-2+deb7u2
# - scdaemon:2.0.19-2+deb7u2
# - gpgsm:2.0.19-2+deb7u2
#
# Last versions recommanded by security team:
# - gnupg2:2.0.19-2+deb7u2
# - gnupg-agent:2.0.19-2+deb7u2
# - scdaemon:2.0.19-2+deb7u2
# - gpgsm:2.0.19-2+deb7u2
#
# CVE List:
# - CVE-2014-4617
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade gnupg2=2.0.19-2+deb7u2 -y
sudo apt-get install --only-upgrade gnupg-agent=2.0.19-2+deb7u2 -y
sudo apt-get install --only-upgrade scdaemon=2.0.19-2+deb7u2 -y
sudo apt-get install --only-upgrade gpgsm=2.0.19-2+deb7u2 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2014/DSA-2968-1.sh
|
Shell
|
mit
| 990 |
#!/bin/bash
pushd ..
. setenv.sh
popd
logs_dir="./logs"
mkdir -p "$logs_dir"
ts=$(date +%Y%m%d-%H%M%S)
gclog_options="-Xloggc:$logs_dir/gc-$ts.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
heap_options="-Xms1g -Xmx2g"
#heap_options="-Xms25m -Xmx50m" # try this if you dare
logging_options="-Djava.util.logging.config.file=./logging.properties"
jmx_options="-Dcom.sun.management.jmxremote.port=6789"
jmx_options="$jmx_options -Dcom.sun.management.jmxremote.ssl=false"
jmx_options="$jmx_options -Dcom.sun.management.jmxremote.authenticate=false"
java -classpath target/classes $gclog_options $heap_options $logging_options $jmx_options jpt.app01.Main -p7666 -q0
|
arnauldvm/jpt-exercises
|
apps/app01/runserver.sh
|
Shell
|
mit
| 664 |
#/bin/bash
##
## File: i386_qemu_floppy.sh
##
## Author: Schuyler Martin <[email protected]>
##
## Description: Wrapper script that starts a QEMU VM that emulates an i386
## computer and loads SeeGOL via a floppy image
##
# -i386 is the closest we can get to a pure 8080 or 8088 emulation
# -vga std Default to VGA standard
# -rtc sets the real time clock
# -fda loads a floppy disk image
# -boot sets the initial boot device
qemu-system-i386 -vga std -rtc base=localtime,clock=host \
-fda img/floppy.img -boot a
|
schuylermartin45/seegol
|
tools/i386_qemu_floppy.sh
|
Shell
|
mit
| 534 |
#!/bin/bash
#
# A script to run Drupal VM functional tests.
# Set defaults if they're not set upstream.
CONFIG="${CONFIG:-tests/config.yml}"
MAKEFILE="${MAKEFILE:-example.drupal.make.yml}"
COMPOSERFILE="${COMPOSERFILE:-example.drupal.composer.json}"
HOSTNAME="${HOSTNAME:-drupalvm.test}"
MACHINE_NAME="${MACHINE_NAME:-drupalvm}"
IP="${IP:-192.168.88.88}"
DRUPALVM_DIR="${DRUPALVM_DIR:-/var/www/drupalvm}"
DRUSH_BIN="${DRUSH_BIN:-drush}"
TEST_INSTALLED_EXTRAS="${TEST_INSTALLED_EXTRAS:-true}"
CONTAINER_ID="${CONTAINER_ID:-dvm-test}"
type="${type:-tests/defaults}"
distro="${distro:-ubuntu1604}"
## Set up vars for Docker setup.
# CentOS 7
if [ $distro = 'centos7' ]; then
init="/usr/lib/systemd/systemd"
opts="--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
# CentOS 6
elif [ $distro = 'centos6' ]; then
init="/sbin/init"
opts="--privileged"
# Ubuntu 16.04
elif [ $distro = 'ubuntu1604' ]; then
init="/lib/systemd/systemd"
opts="--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
# Ubuntu 14.04
elif [ $distro = 'ubuntu1404' ]; then
init="/sbin/init"
opts="--privileged"
# Debian 9
elif [ $distro = 'debian9' ]; then
init="/lib/systemd/systemd"
opts="--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
# Debian 8
elif [ $distro = 'debian8' ]; then
init="/lib/systemd/systemd"
opts="--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
# Fedora 24
elif [ $distro = 'fedora24' ]; then
init="/usr/lib/systemd/systemd"
opts="--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro"
fi
# Set OS-specific options.
if [[ "$OSTYPE" == "darwin"* ]]; then
volume_opts='rw,cached'
xargs_command='xargs'
else
volume_opts='rw'
xargs_command='xargs -r'
fi
# Exit on any individual command failure.
set -e
# Pretty colors.
red='\033[0;31m'
green='\033[0;32m'
neutral='\033[0m'
# Remove test container if it already exists.
printf "\n"${green}"Removing any existing test containers..."${neutral}"\n"
docker ps -aq --filter name=$CONTAINER_ID | $xargs_command docker rm -f -v
printf ${green}"...done!"${neutral}"\n"
# Run the container.
printf "\n"${green}"Starting Docker container: geerlingguy/docker-$distro-ansible."${neutral}"\n"
docker run --name=$CONTAINER_ID -d \
--add-host "$HOSTNAME drupalvm":127.0.0.1 \
-v $PWD:/var/www/drupalvm/:$volume_opts \
$opts \
geerlingguy/docker-$distro-ansible:latest \
$init
# Set up directories.
docker exec $CONTAINER_ID mkdir -p /var/www/drupalvm/drupal
[[ ! -z "$config_dir" ]] && docker exec $CONTAINER_ID mkdir -p $config_dir || true
# Copy configuration into place.
docker exec $CONTAINER_ID cp $DRUPALVM_DIR/$CONFIG ${config_dir:-$DRUPALVM_DIR}/config.yml
docker exec $CONTAINER_ID cp $DRUPALVM_DIR/$MAKEFILE ${config_dir:-$DRUPALVM_DIR}/drupal.make.yml
docker exec $CONTAINER_ID cp $DRUPALVM_DIR/$COMPOSERFILE ${config_dir:-$DRUPALVM_DIR}/drupal.composer.json
[[ ! -z "$DRUPALVM_ENV" ]] && docker exec $CONTAINER_ID bash -c "cp $DRUPALVM_DIR/tests/$DRUPALVM_ENV.config.yml ${config_dir:-$DRUPALVM_DIR}/$DRUPALVM_ENV.config.yml" || true
# Override configuration variables with local config.
[[ ! -z "$local_config" ]] && docker exec $CONTAINER_ID bash -c "cp $DRUPALVM_DIR/$local_config ${config_dir:-$DRUPALVM_DIR}/local.config.yml" || true
# Check playbook syntax.
printf "\n"${green}"Checking playbook syntax..."${neutral}"\n"
docker exec --tty $CONTAINER_ID env TERM=xterm ansible-playbook $DRUPALVM_DIR/provisioning/playbook.yml --syntax-check
# Run the setup playbook.
printf "\n"${green}"Running the setup playbook..."${neutral}"\n"
docker exec --tty $CONTAINER_ID env TERM=xterm ansible-playbook /var/www/drupalvm/tests/test-setup.yml
# Run the Drupal VM playbook.
printf "\n"${green}"Running the Drupal VM playbook..."${neutral}"\n"
if [ ! -z "${config_dir}" ]; then
# Run with config_dir specified.
docker exec $CONTAINER_ID env TERM=xterm ANSIBLE_FORCE_COLOR=true DRUPALVM_ENV=$DRUPALVM_ENV \
ansible-playbook $DRUPALVM_DIR/provisioning/playbook.yml \
--extra-vars="config_dir=$config_dir";
else
# Run without config_dir specified.
docker exec $CONTAINER_ID env TERM=xterm ANSIBLE_FORCE_COLOR=true DRUPALVM_ENV=$DRUPALVM_ENV \
ansible-playbook $DRUPALVM_DIR/provisioning/playbook.yml;
fi
# Drupal.
printf "\n"${green}"Running functional tests..."${neutral}"\n"
docker exec $CONTAINER_ID curl -s --header Host:$HOSTNAME localhost \
| grep -q '<title>Welcome to Drupal' \
&& (echo 'Drupal install pass' && exit 0) \
|| (echo 'Drupal install fail' && exit 1)
# Adminer.
if [ $TEST_INSTALLED_EXTRAS = true ]; then
docker exec $CONTAINER_ID curl -s --header Host:adminer.$HOSTNAME localhost \
| grep -q '<title>Login - Adminer' \
&& (echo 'Adminer install pass' && exit 0) \
|| (echo 'Adminer install fail' && exit 1)
fi
# Pimp My Log.
if [ $TEST_INSTALLED_EXTRAS = true ]; then
docker exec $CONTAINER_ID curl -s --header Host:pimpmylog.$HOSTNAME localhost \
| grep -q '<title>Pimp my Log' \
&& (echo 'Pimp my Log install pass' && exit 0) \
|| (echo 'Pimp my Log install fail' && exit 1)
fi
# MailHog.
if [ $TEST_INSTALLED_EXTRAS = true ]; then
docker exec $CONTAINER_ID curl -s localhost:8025 \
| grep -q '<title>MailHog' \
&& (echo 'MailHog install pass' && exit 0) \
|| (echo 'MailHog install fail' && exit 1)
fi
# Varnish.
if [ $TEST_INSTALLED_EXTRAS = true ]; then
docker exec $CONTAINER_ID curl -sI --header Host:$HOSTNAME localhost:81 \
| grep -q 'Via: .* varnish' \
&& (echo 'Varnish install pass' && exit 0) \
|| (echo 'Varnish install fail' && exit 1)
fi
# Dashboard.
docker exec $CONTAINER_ID curl -s --header Host:$IP localhost \
| grep -q "<li>$IP $HOSTNAME</li>" \
&& (echo 'Dashboard install pass' && exit 0) \
|| (echo 'Dashboard install fail' && exit 1)
# Drush.
docker exec $CONTAINER_ID $DRUSH_BIN @$MACHINE_NAME.$HOSTNAME status \
| grep -q 'Drupal bootstrap.*Successful' \
&& (echo 'Drush install pass' && exit 0) \
|| (echo 'Drush install fail' && exit 1)
# Remove test container.
printf "\n"${green}"Cleaning up..."${neutral}"\n"
docker rm -f $CONTAINER_ID
printf ${green}"...done!"${neutral}"\n\n"
|
thom8/drupal-vm
|
tests/run-tests.sh
|
Shell
|
mit
| 6,142 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3263-1
#
# Security announcement date: 2015-05-19 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:23 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - proftpd-dfsg:1.3.4a-5+deb7u3
# - proftpd-basic:1.3.4a-5+deb7u3
# - proftpd-dev:1.3.4a-5+deb7u3
# - proftpd-doc:1.3.4a-5+deb7u3
# - proftpd-mod-mysql:1.3.4a-5+deb7u3
# - proftpd-mod-pgsql:1.3.4a-5+deb7u3
# - proftpd-mod-ldap:1.3.4a-5+deb7u3
# - proftpd-mod-odbc:1.3.4a-5+deb7u3
# - proftpd-mod-sqlite:1.3.4a-5+deb7u3
#
# Last versions recommanded by security team:
# - proftpd-dfsg:1.3.4a-5+deb7u3
# - proftpd-basic:1.3.4a-5+deb7u3
# - proftpd-dev:1.3.4a-5+deb7u3
# - proftpd-doc:1.3.4a-5+deb7u3
# - proftpd-mod-mysql:1.3.4a-5+deb7u3
# - proftpd-mod-pgsql:1.3.4a-5+deb7u3
# - proftpd-mod-ldap:1.3.4a-5+deb7u3
# - proftpd-mod-odbc:1.3.4a-5+deb7u3
# - proftpd-mod-sqlite:1.3.4a-5+deb7u3
#
# CVE List:
# - CVE-2015-3306
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade proftpd-dfsg=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-basic=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-dev=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-doc=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-mod-mysql=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-mod-pgsql=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-mod-ldap=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-mod-odbc=1.3.4a-5+deb7u3 -y
sudo apt-get install --only-upgrade proftpd-mod-sqlite=1.3.4a-5+deb7u3 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2015/DSA-3263-1.sh
|
Shell
|
mit
| 1,803 |
#!/bin/bash
# A script to build shared and static versions of the Lapack libraries:
# 1. Assumes the NETLIB source archive lapack-$LPVER.tar.gz is present
# 2. Under directory lapack-$LPVER, make.inc.example, SRC/Makefile and
# BLAS/SRC/Makefile are modified with lapack-$LPVER.patch
alias cp=cp
MAKE_OPTS="-j 6"
ALL_BUILDS="generic intel haswell nehalem skylake"
# Patch lapack files make.inc.example, SRC/Makefile and BLAS/SRC/Makefile
cat > lapack.patch.uue << 'EOF'
begin-base64 644 lapack.patch
LS0tIGxhcGFjay0zLjkuMC9tYWtlLmluYy5leGFtcGxlCTIwMTktMTEtMjEg
MTg6NTc6NDMuMDAwMDAwMDAwICsxMTAwCisrKyBsYXBhY2stMy45LjAubmV3
L21ha2UuaW5jLmV4YW1wbGUJMjAyMC0wOC0yMSAxMzowMzo0Mi4xNzU0MDg1
MzUgKzEwMDAKQEAgLTksNyArOSw4IEBACiAjICBDQyBpcyB0aGUgQyBjb21w
aWxlciwgbm9ybWFsbHkgaW52b2tlZCB3aXRoIG9wdGlvbnMgQ0ZMQUdTLgog
IwogQ0MgPSBnY2MKLUNGTEFHUyA9IC1PMworQkxET1BUUyA9IC1mUElDIC1t
NjQgLW10dW5lPWdlbmVyaWMKK0NGTEFHUyA9IC1PMyAkKEJMRE9QVFMpCiAK
ICMgIE1vZGlmeSB0aGUgRkMgYW5kIEZGTEFHUyBkZWZpbml0aW9ucyB0byB0
aGUgZGVzaXJlZCBjb21waWxlcgogIyAgYW5kIGRlc2lyZWQgY29tcGlsZXIg
b3B0aW9ucyBmb3IgeW91ciBtYWNoaW5lLiAgTk9PUFQgcmVmZXJzIHRvCkBA
IC0xOSwxMCArMjAsMTAgQEAKICMgIGFuZCBoYW5kbGUgdGhlc2UgcXVhbnRp
dGllcyBhcHByb3ByaWF0ZWx5LiBBcyBhIGNvbnNlcXVlbmNlLCBvbmUKICMg
IHNob3VsZCBub3QgY29tcGlsZSBMQVBBQ0sgd2l0aCBmbGFncyBzdWNoIGFz
IC1mZnBlLXRyYXA9b3ZlcmZsb3cuCiAjCi1GQyA9IGdmb3J0cmFuCi1GRkxB
R1MgPSAtTzIgLWZyZWN1cnNpdmUKK0ZDID0gZ2ZvcnRyYW4gLWZyZWN1cnNp
dmUgJChCTERPUFRTKQorRkZMQUdTID0gLU8yIAogRkZMQUdTX0RSViA9ICQo
RkZMQUdTKQotRkZMQUdTX05PT1BUID0gLU8wIC1mcmVjdXJzaXZlCitGRkxB
R1NfTk9PUFQgPSAtTzAKIAogIyAgRGVmaW5lIExERkxBR1MgdG8gdGhlIGRl
c2lyZWQgbGlua2VyIG9wdGlvbnMgZm9yIHlvdXIgbWFjaGluZS4KICMKQEAg
LTU3LDcgKzU4LDcgQEAKICMgIFVuY29tbWVudCB0aGUgZm9sbG93aW5nIGxp
bmUgdG8gaW5jbHVkZSBkZXByZWNhdGVkIHJvdXRpbmVzIGluCiAjICB0aGUg
TEFQQUNLIGxpYnJhcnkuCiAjCi0jQlVJTERfREVQUkVDQVRFRCA9IFllcwor
QlVJTERfREVQUkVDQVRFRCA9IFllcwogCiAjICBMQVBBQ0tFIGhhcyB0aGUg
aW50ZXJmYWNlIHRvIHNvbWUgcm91dGluZXMgZnJvbSB0bWdsaWIuCiAjICBJ
ZiBMQVBBQ0tFX1dJVEhfVE1HIGlzIGRlZmluZWQsIGFkZCB0aG9zZSByb3V0
aW5lcyB0byBMQVBBQ0tFLgpAQCAtNzYsNyArNzcsNyBAQAogIyAgbWFjaGlu
ZS1zcGVjaWZpYywgb3B0aW1pemVkIEJMQVMgbGlicmFyeSBzaG91bGQgYmUg
dXNlZCB3aGVuZXZlcgogIyAgcG9zc2libGUuKQogIwotQkxBU0xJQiAgICAg
ID0gJChUT1BTUkNESVIpL2xpYnJlZmJsYXMuYQorQkxBU0xJQiAgICAgID0g
JChUT1BTUkNESVIpL2xpYmJsYXMuYQogQ0JMQVNMSUIgICAgID0gJChUT1BT
UkNESVIpL2xpYmNibGFzLmEKIExBUEFDS0xJQiAgICA9ICQoVE9QU1JDRElS
KS9saWJsYXBhY2suYQogVE1HTElCICAgICAgID0gJChUT1BTUkNESVIpL2xp
YnRtZ2xpYi5hCi0tLSBsYXBhY2stMy45LjAvU1JDL01ha2VmaWxlCTIwMTkt
MTEtMjEgMTg6NTc6NDMuMDAwMDAwMDAwICsxMTAwCisrKyBsYXBhY2stMy45
LjAubmV3L1NSQy9NYWtlZmlsZQkyMDIwLTA4LTIxIDEzOjAxOjE2LjEzMDY2
NDMwNiArMTAwMApAQCAtNTMyLDYgKzUzMiw5IEBACiAJJChBUikgJChBUkZM
QUdTKSAkQCAkXgogCSQoUkFOTElCKSAkQAogCiskKG5vdGRpciAkKExBUEFD
S0xJQjolLmE9JS5zbykpOiAkKEFMTE9CSikgJChBTExYT0JKKSAkKERFUFJF
Q0FURUQpCisJJChGQykgLXNoYXJlZCAtV2wsLXNvbmFtZSwkQCAtbyAkQCAk
XgorCiAuUEhPTlk6IHNpbmdsZSBjb21wbGV4IGRvdWJsZSBjb21wbGV4MTYK
IHNpbmdsZTogJChTTEFTUkMpICQoRFNMQVNSQykgJChTWExBU1JDKSAkKFND
TEFVWCkgJChBTExBVVgpCiAJJChBUikgJChBUkZMQUdTKSAkKExBUEFDS0xJ
QikgJF4KLS0tIGxhcGFjay0zLjkuMC9CTEFTL1NSQy9NYWtlZmlsZQkyMDE5
LTExLTIxIDE4OjU3OjQzLjAwMDAwMDAwMCArMTEwMAorKysgbGFwYWNrLTMu
OS4wLm5ldy9CTEFTL1NSQy9NYWtlZmlsZQkyMDIwLTA4LTIxIDEzOjAyOjAw
LjA1MDI4NjY2MiArMTAwMApAQCAtMTQzLDYgKzE0Myw5IEBACiAJJChBUikg
JChBUkZMQUdTKSAkQCAkXgogCSQoUkFOTElCKSAkQAogCiskKG5vdGRpciAk
KEJMQVNMSUI6JS5hPSUuc28pKTogJChBTExPQkopCisJJChGQykgLXNoYXJl
ZCAtV2wsLXNvbmFtZSwkQCAtbyAkQCAkXgorCiAuUEhPTlk6IHNpbmdsZSBk
b3VibGUgY29tcGxleCBjb21wbGV4MTYKIHNpbmdsZTogJChTQkxBUzEpICQo
QUxMQkxBUykgJChTQkxBUzIpICQoU0JMQVMzKQogCSQoQVIpICQoQVJGTEFH
UykgJChCTEFTTElCKSAkXgo=
====
EOF
uudecode lapack.patch.uue
rm -Rf lapack-$LPVER
tar -xf lapack-$LPVER.tar.gz
pushd lapack-$LPVER
patch -p1 < ../lapack.patch
cp make.inc.example make.inc
popd
# Create directories
mkdir -p lapack
pushd lapack
mkdir -p $ALL_BUILDS
# Populate directories
for dir in $ALL_BUILDS ; do
echo $dir
pushd $dir
cp -Rf ../../lapack-$LPVER .
popd
done
# Set build options in each directory
for dir in generic intel ; do
sed -i -e "s/^BLDOPTS\ *=.*/BLDOPTS\ \ = -fPIC -m64 -mtune=$dir/" $dir/lapack-$LPVER/make.inc
done
for dir in haswell nehalem skylake ; do
sed -i -e "s/^BLDOPTS\ *=.*/BLDOPTS\ \ = -fPIC -march=$dir/" $dir/lapack-$LPVER/make.inc
done
# Build in each directory
for dir in $ALL_BUILDS ; do
echo $dir ;
pushd $dir/lapack-$LPVER/BLAS/SRC ;
make $MAKE_OPTS ;
make $MAKE_OPTS libblas.so ;
cp libblas.so ../.. ;
popd ;
pushd $dir/lapack-$LPVER/SRC ;
make $MAKE_OPTS ;
make $MAKE_OPTS liblapack.so ;
cp liblapack.so .. ;
popd ;
done
# Done
popd
|
robertgj/DesignOfIIRFilters
|
benchmark/build-lapack.sh
|
Shell
|
mit
| 4,638 |
#!/bin/bash
rm -f *.log *.dump &> /dev/null
|
Zubax/zubax_chibios
|
tools/bootloader_stress_test/cleanup.sh
|
Shell
|
mit
| 45 |
wget -P /tmp http://dev.mysql.com/get/mysql-apt-config_0.5.3-1_all.deb
sudo debconf-set-selections <<< 'mysql-apt-config mysql-apt-config/select-router select none'
sudo debconf-set-selections <<< 'mysql-apt-config mysql-apt-config/select-connector-python select none'
sudo debconf-set-selections <<< 'mysql-apt-config mysql-apt-config/select-workbench select none'
sudo debconf-set-selections <<< 'mysql-apt-config mysql-apt-config/select-server select mysql-5.7'
sudo debconf-set-selections <<< 'mysql-apt-config mysql-apt-config/select-mysql-utilities select mysql-utilities-1.5'
sudo DEBIAN_FRONTEND=noninteractive dpkg -i /tmp/mysql-apt-config_0.5.3-1_all.deb
sudo apt-get update
sudo debconf-set-selections <<< "mysql-community-server mysql-community-server/root-pass password secret"
sudo debconf-set-selections <<< "mysql-community-server mysql-community-server/re-root-pass password secret"
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y mysql-server
# Configure MySQL default charset to real utf8
sudo sed -i '/\[client\]/a default-character-set = utf8mb4' /etc/mysql/my.cnf
sudo sed -i '/\[mysqld\]/a character-set-client-handshake = FALSE' /etc/mysql/my.cnf
sudo sed -i '/\[mysqld\]/a character-set-server = utf8mb4' /etc/mysql/my.cnf
sudo sed -i '/\[mysqld\]/a collation-server = utf8mb4_unicode_ci' /etc/mysql/my.cnf
# Configure MySQL Password Lifetime
sudo sed -i '/\[mysqld\]/a default_password_lifetime = 0' /etc/mysql/my.cnf
# Allow remote connections to MySQL server
sudo sed -i "s/[# ]*bind-address\([[:space:]]*\)=\([[:space:]]*\).*/bind-address = 0.0.0.0/g" /etc/mysql/my.cnf
mysql --user="root" --password="secret" -e "GRANT ALL ON *.* TO root@'0.0.0.0' IDENTIFIED BY 'secret' WITH GRANT OPTION;"
mysql --user="root" --password="secret" -e "CREATE USER 'vagrant'@'0.0.0.0' IDENTIFIED BY 'secret';"
mysql --user="root" --password="secret" -e "GRANT ALL ON *.* TO 'vagrant'@'0.0.0.0' IDENTIFIED BY 'secret' WITH GRANT OPTION;"
mysql --user="root" --password="secret" -e "GRANT ALL ON *.* TO 'vagrant'@'%' IDENTIFIED BY 'secret' WITH GRANT OPTION;"
mysql --user="root" --password="secret" -e "FLUSH PRIVILEGES;"
mysql --user="root" --password="secret" -e "CREATE DATABASE vagrant;"
# Loads timezone tables
mysql_tzinfo_to_sql /usr/share/zoneinfo | mysql --user=root --password=secret --force mysql
# Retart MySQL server to apply new configuration
sudo service mysql restart
|
adiachenko/catchy_dev
|
.provision/1_mysql.sh
|
Shell
|
mit
| 2,417 |
###############################################################################
# Copyright (c) 2000-2017 Ericsson Telecom AB
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Balasko, Jeno
# Kovacs, Ferenc
#
###############################################################################
#!/bin/bash
echo "End Start!"
echo -n "$1 $2" >end_script.out
echo "End Stop!"
|
BotondBaranyi/titan.core
|
function_test/Config_Parser/end_script.sh
|
Shell
|
epl-1.0
| 585 |
#!/bin/bash
echo "------------------------------------------"
echo "COM.INST.CODECOMMENT"
echo "Fichier OK de TU"
echo "------------------------------------------"
# Ce script fonctionne dans les environnements SunOS, Solaris, HP-UX et Linux (bash ou ksh).
# Il réalise la fonction d'affichage de sa propre taille.
#
# fichier texte
case `uname` in
SunOS)
version=`uname -r | cut -d. -f1`
if [ $version -eq 5 ]
then
echo "Solaris 2"
ls -l $1 | awk -e '{print $5}';
else
echo "SunOS"
ls -l $1 | awk -e '{print $4}';
fi
;;
HP-UX|Linux)
echo "Linux"
ls -l $1 | awk '{print $5}';
;;
*) echo Systeme `uname` non reconnu.
exit 1;
;;
esac
|
dupuisa/i-CodeCNES
|
shell-rules/src/test/resources/COM/INST/CodeComment/noError.sh
|
Shell
|
epl-1.0
| 748 |
#!/usr/bin/perl
# main.command
# AutoExifMover
# Created by Pierre Andrews on 01/07/2007.
# Copyright 2007-2008 Pierre Andrews. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# add our 'lib' directory to the include list BEFORE 'use Image::ExifTool'
my $exeDir;
BEGIN {
# get exe directory
$exeDir = ($0 =~ /(.*)[\\\/]/) ? $1 : '.';
# add lib directory at start of include path
unshift @INC, "$exeDir/lib";
}
use Image::ExifTool;
use File::Path;
use File::Basename;
use File::Copy;
#my $homedir=`ksh -c "(cd ~ 2>/dev/null && /bin/pwd)"`;
#chomp($homedir);
$homedir = $ENV{'HOME'};
my $exifTool = new Image::ExifTool;
my $pattern = $ENV{'pathPattern'};
if(!$pattern) {
$pattern = "%Y_%m_%d/";
}
$exifTool->Options(DateFormat => $pattern);
while(<>) {
chomp;
if(-e $_) {
my $file = $_;
my $name;
my $dir;
my $suffix;
my $with_basename=0;
($name,$dir,$suffix) = fileparse($file,qr/\.[^.]*$/);
my $destPath = $ENV{'directoryPath'};
if(!$destPath) { $destPath = $dir; }
my $info = $exifTool->ImageInfo($file, 'DateTimeOriginal');
my $path = $$info{'DateTimeOriginal'};
if(!$path) {
$info = $exifTool->ImageInfo($file, 'FileModifyDate');
$path = $$info{'FileModifyDate'};
}
if(!$path && $pattern !~ /%[A-Za-z]/) {
$path = $pattern;
}
if($path) {
while($path =~ /:([a-zA-Z]+):/g) {
$label = $1;
if($label =~ /basename/i) {
$with_basename=true;
$path =~ s/:basename:/$name/g;
} elsif($label =~ /ext/i) {
$path =~ s/:ext:/$suffix/g;
} else {
my $info = $exifTool->ImageInfo($_, "$label");
if($$info{"$label"}) {
my $value = $$info{"$label"};
$value =~ s/^\s+//;
$value =~ s/\s+$//;
$value =~ s/\//_/;
chomp($value);
$value =~ s/ /_/g;
$path =~ s/:$label:/$value/g;
} else {
$path =~ s/:$label://g;
}
}
}
$path =~ s/[^A-Za-z0-9_\/.\-~]/_/g;
$path = $destPath.'/'.$path;
$path =~ s/^~/$homedir/;
($new_name,$new_dir,$new_suffix) = fileparse($path,qr/\.[^.]*$/);
if($new_name && !$with_basename) {
$path = $new_dir.'/'.$new_name.$new_suffix;
}
if(!$new_name) {
$path .= $name.$suffix;
$new_name = $name;
$new_suffix = $suffix;
}
if(!$new_suffix || $new_suffix!=$suffix) {
$path .= $suffix;
}
if(!$ENV{'test'}) { mkpath($new_dir); }
if(!$ENV{'overwrite'}) {
if(-e $path) {
if($path !~ /:cnt:/i) {
$path =~ s/(\.[^.]*)$/_:cnt:$1/;
}
my $local_cnt = 1;
$new_path = $path;
$new_path =~ s/:cnt:/$local_cnt/g;
while(-e $new_path) {
$local_cnt++;
$new_path = $path;
$new_path =~ s/:cnt:/$local_cnt/g;
}
$path = $new_path;
}
$path =~ s/_+/_/g;
$path =~ s/_:cnt://g;
} else {
$path =~ s/:cnt://g;
}
if(!$ENV{'test'}) {
if($ENV{'action'} == 1) {
move($file,$path);
} else {
copy($file,$path);
}
}
print $path."\n";
}
}
}
|
Mortimerp9/MetaDataMover
|
main.command
|
Shell
|
gpl-2.0
| 3,669 |
#!/bin/sh
LD_LIBRARY_PATH=. gdb ./OPKManager
|
theZiz/OPKManager
|
OPKManager_debug.sh
|
Shell
|
gpl-2.0
| 45 |
#!/bin/sh
. test.definitions.sh
echo " Finding matches.";
for SPACE in 0 1
do
for CORNER_CASE in 0 1
do
NUM=`expr $CORNER_CASE \\* 2`;
NUM=`expr $NUM + $SPACE`;
case $NUM in
0) OUTPUT_ID=$OUTPUT_ID_NT;
REF_ID=$OUTPUT_ID;
;;
1) OUTPUT_ID=$OUTPUT_ID_CS;
REF_ID=$OUTPUT_ID;
;;
2) OUTPUT_ID=$OUTPUT_ID_CC_NT;
REF_ID=$REF_ID_CC;
;;
3) OUTPUT_ID=$OUTPUT_ID_CC_CS;
REF_ID=$REF_ID_CC;
;;
default)
exit 1;
esac
echo " Testing -A "$SPACE "CC="$CORNER_CASE;
RG_FASTA=$OUTPUT_DIR$REF_ID".fa";
READS=$OUTPUT_DIR"reads.$OUTPUT_ID.fastq";
# Find matches
CMD="${CMD_PREFIX}bfast match -f $RG_FASTA -r $READS -A $SPACE -n $NUM_THREADS -T $TMP_DIR > ${OUTPUT_DIR}bfast.matches.file.$OUTPUT_ID.bmf";
eval $CMD 2> /dev/null;
# Get return code
if [ "$?" -ne "0" ]; then
# Run again without piping anything
echo $CMD;
eval $CMD;
exit 1
fi
done
done
# Test passed!
echo " Matches found.";
exit 0
|
nh13/BFAST
|
tests/test.match.sh
|
Shell
|
gpl-2.0
| 982 |
# export AWS_ACCESS_KEY="Your-Access-Key"
# export AWS_SECRET_KEY="Your-Secret-Key"
today=`date +"%d-%m-%Y","%T"`
logfile="/awslog/ec2-access.log"
# Grab all Security Groups IDs for DISALLOW action and export the IDs to a text file
sudo aws ec2 describe-security-groups --filters Name=tag:close-allports-time,Values=08-00 Name=tag:bash-profile,Values=ad --query SecurityGroups[].[GroupId] --output text > ~/tmp/disallowall_ad_info.txt 2>&1
# Take list of changing security groups
for group_id in $(cat ~/tmp/disallowall_ad_info.txt)
do
# Change rules in security group
sudo aws ec2 revoke-security-group-ingress --group-id $group_id --protocol all --port all --cidr 0.0.0.0/0
# Put info into log file
echo Attempt $today disallow access to instances with attached group $group_id for all ports >> $logfile
done
|
STARTSPACE/aws-access-to-ec2-by-timetable
|
all/disallow-ad/all-disallow-ad-08.sh
|
Shell
|
gpl-2.0
| 817 |
#!/bin/sh
#
# automatic regression test for ffmpeg
#
#
#set -x
# Even in the 21st century some diffs do not support -u.
diff -u "$0" "$0" > /dev/null 2>&1
if [ $? -eq 0 ]; then
diff_cmd="diff -u"
else
diff_cmd="diff"
fi
diff -w "$0" "$0" > /dev/null 2>&1
if [ $? -eq 0 ]; then
diff_cmd="$diff_cmd -w"
fi
set -e
datadir="./tests/data"
logfile="$datadir/ffmpeg.regression"
outfile="$datadir/a-"
# tests to run
if [ "$1" = "mpeg4" ] ; then
do_mpeg4=y
elif [ "$1" = "mpeg" ] ; then
do_mpeg=y
do_mpeg2=y
elif [ "$1" = "ac3" ] ; then
do_ac3=y
elif [ "$1" = "huffyuv" ] ; then
do_huffyuv=y
elif [ "$1" = "mpeg2thread" ] ; then
do_mpeg2thread=y
elif [ "$1" = "snow" ] ; then
do_snow=y
elif [ "$1" = "snowll" ] ; then
do_snowll=y
elif [ "$1" = "libavtest" ] ; then
do_libavtest=y
logfile="$datadir/libav.regression"
outfile="$datadir/b-"
else
do_mpeg=y
do_mpeg2=y
do_mpeg2thread=y
do_msmpeg4v2=y
do_msmpeg4=y
do_wmv1=y
do_wmv2=y
do_h261=y
do_h263=y
do_h263p=y
do_mpeg4=y
do_mp4psp=y
do_huffyuv=y
do_mjpeg=y
do_ljpeg=y
do_jpegls=y
do_rv10=y
do_rv20=y
do_mp2=y
do_ac3=y
do_g726=y
do_adpcm_ima_wav=y
do_adpcm_ms=y
do_flac=y
do_wma=y
do_vorbis=y
do_rc=y
do_mpeg4adv=y
do_mpeg4thread=y
do_mpeg4nr=y
do_mpeg1b=y
do_asv1=y
do_asv2=y
do_flv=y
do_ffv1=y
do_error=y
do_svq1=y
do_snow=y
do_snowll=y
do_adpcm_yam=y
do_dv=y
do_dv50=y
do_flashsv=y
do_adpcm_swf=y
fi
# various files
ffmpeg="./ffmpeg_g"
tiny_psnr="tests/tiny_psnr"
reffile="$2"
benchfile="$datadir/ffmpeg.bench"
bench="$datadir/bench.tmp"
bench2="$datadir/bench2.tmp"
raw_src="$3/%02d.pgm"
raw_dst="$datadir/out.yuv"
raw_ref="$datadir/ref.yuv"
pcm_src="tests/asynth1.sw"
pcm_dst="$datadir/out.wav"
pcm_ref="$datadir/ref.wav"
if [ X"`echo | md5sum 2> /dev/null`" != X ]; then
do_md5sum() { md5sum -b $1; }
elif [ -x /sbin/md5 ]; then
do_md5sum() { /sbin/md5 -r $1 | sed 's# \**\./# *./#'; }
else
do_md5sum() { echo No md5sum program found; }
fi
# create the data directory if it does not exist
mkdir -p $datadir
FFMPEG_OPTS="-y -flags +bitexact -dct fastint -idct simple"
do_ffmpeg()
{
f="$1"
shift
echo $ffmpeg $FFMPEG_OPTS $*
$ffmpeg $FFMPEG_OPTS -benchmark $* > $bench 2> /tmp/ffmpeg$$
egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:)" /tmp/ffmpeg$$ || true
rm -f /tmp/ffmpeg$$
do_md5sum $f >> $logfile
if [ $f = $raw_dst ] ; then
$tiny_psnr $f $raw_ref >> $logfile
elif [ $f = $pcm_dst ] ; then
$tiny_psnr $f $pcm_ref 2 >> $logfile
else
wc -c $f >> $logfile
fi
expr "`cat $bench`" : '.*utime=\(.*s\)' > $bench2
echo `cat $bench2` $f >> $benchfile
}
do_ffmpeg_nomd5()
{
f="$1"
shift
echo $ffmpeg $FFMPEG_OPTS $*
$ffmpeg $FFMPEG_OPTS -benchmark $* > $bench 2> /tmp/ffmpeg$$
egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:)" /tmp/ffmpeg$$ || true
rm -f /tmp/ffmpeg$$
if [ $f = $raw_dst ] ; then
$tiny_psnr $f $raw_ref >> $logfile
elif [ $f = $pcm_dst ] ; then
$tiny_psnr $f $pcm_ref 2 >> $logfile
else
wc -c $f >> $logfile
fi
expr "`cat $bench`" : '.*utime=\(.*s\)' > $bench2
echo `cat $bench2` $f >> $benchfile
}
do_ffmpeg_crc()
{
f="$1"
shift
echo $ffmpeg $FFMPEG_OPTS $* -f crc $datadir/ffmpeg.crc
$ffmpeg $FFMPEG_OPTS $* -f crc $datadir/ffmpeg.crc > /tmp/ffmpeg$$ 2>&1
egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:|ffmpeg version| configuration| built)" /tmp/ffmpeg$$ || true
rm -f /tmp/ffmpeg$$
echo "$f `cat $datadir/ffmpeg.crc`" >> $logfile
}
do_ffmpeg_nocheck()
{
f="$1"
shift
echo $ffmpeg $FFMPEG_OPTS $*
$ffmpeg $FFMPEG_OPTS -benchmark $* > $bench 2> /tmp/ffmpeg$$
egrep -v "^(Stream|Press|Input|Output|frame| Stream| Duration|video:)" /tmp/ffmpeg$$ || true
rm -f /tmp/ffmpeg$$
expr "`cat $bench`" : '.*utime=\(.*s\)' > $bench2
echo `cat $bench2` $f >> $benchfile
}
do_video_decoding()
{
do_ffmpeg $raw_dst -y $1 -i $file -f rawvideo $2 $raw_dst
}
do_video_encoding()
{
file=${outfile}$1
do_ffmpeg $file -y $2 -f $3 -i $raw_src $4 $file
}
do_audio_encoding()
{
file=${outfile}$1
do_ffmpeg $file -y -ab 128k -ac 2 -f s16le -i $pcm_src $3 $file
}
do_audio_decoding()
{
do_ffmpeg $pcm_dst -y -i $file -f wav $pcm_dst
}
do_libav()
{
file=${outfile}libav.$1
do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f s16le -i $pcm_src $2 $file
do_ffmpeg_crc $file -i $file $3
}
do_streamed_images()
{
file=${outfile}libav.$1
do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f image2pipe $file
do_ffmpeg_crc $file -f image2pipe -i $file
}
do_image_formats()
{
file=${outfile}libav%02d.$1
$ffmpeg -t 0.5 -y -qscale 10 -f pgmyuv -i $raw_src $2 $3 -flags +bitexact $file
do_ffmpeg_crc $file $3 -i $file
do_md5sum ${outfile}libav02.$1 >> $logfile
}
do_audio_only()
{
file=${outfile}libav.$1
do_ffmpeg $file -t 1 -y -qscale 10 -f s16le -i $pcm_src $file
do_ffmpeg_crc $file -i $file
}
echo "ffmpeg regression test" > $logfile
echo "ffmpeg benchmarks" > $benchfile
###################################
# generate reference for quality check
do_ffmpeg_nocheck $raw_ref -y -f pgmyuv -i $raw_src -an -f rawvideo $raw_ref
do_ffmpeg_nocheck $pcm_ref -y -ab 128k -ac 2 -ar 44100 -f s16le -i $pcm_src -f wav $pcm_ref
###################################
if [ -n "$do_mpeg" ] ; then
# mpeg1
do_video_encoding mpeg1.mpg "-qscale 10" pgmyuv "-f mpeg1video"
do_video_decoding
fi
###################################
if [ -n "$do_mpeg2" ] ; then
# mpeg2
do_video_encoding mpeg2.mpg "-qscale 10" pgmyuv "-vcodec mpeg2video -f mpeg1video"
do_video_decoding
# mpeg2 encoding intra vlc qprd
do_video_encoding mpeg2ivlc-qprd.mpg "-vb 500k -bf 2 -flags +trell+qprd+mv0 -flags2 +ivlc -cmp 2 -subcmp 2 -mbd rd" pgmyuv "-vcodec mpeg2video -f mpeg2video"
# mpeg2 decoding
do_video_decoding
# mpeg2
do_video_encoding mpeg2.mpg "-qscale 10" pgmyuv "-vcodec mpeg2video -idct int -dct int -f mpeg1video"
do_video_decoding "-idct int"
# mpeg2 encoding interlaced
do_video_encoding mpeg2i.mpg "-qscale 10" pgmyuv "-vcodec mpeg2video -f mpeg1video -flags +ildct+ilme"
# mpeg2 decoding
do_video_decoding
fi
###################################
if [ -n "$do_mpeg2thread" ] ; then
# mpeg2 encoding interlaced
do_video_encoding mpeg2thread.mpg "-qscale 10" pgmyuv "-vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 2"
# mpeg2 decoding
do_video_decoding
# mpeg2 encoding interlaced using intra vlc
do_video_encoding mpeg2threadivlc.mpg "-qscale 10" pgmyuv "-vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -flags2 +ivlc -threads 2"
# mpeg2 decoding
do_video_decoding
# mpeg2 encoding interlaced
file=${outfile}mpeg2reuse.mpg
do_ffmpeg $file -y -sameq -me_threshold 256 -mb_threshold 1024 -i ${outfile}mpeg2thread.mpg -vcodec mpeg2video -f mpeg1video -bf 2 -flags +ildct+ilme -threads 4 $file
# mpeg2 decoding
do_video_decoding
fi
###################################
if [ -n "$do_msmpeg4v2" ] ; then
# msmpeg4
do_video_encoding msmpeg4v2.avi "-qscale 10" pgmyuv "-an -vcodec msmpeg4v2"
do_video_decoding
fi
###################################
if [ -n "$do_msmpeg4" ] ; then
# msmpeg4
do_video_encoding msmpeg4.avi "-qscale 10" pgmyuv "-an -vcodec msmpeg4"
do_video_decoding
fi
###################################
if [ -n "$do_wmv1" ] ; then
# wmv1
do_video_encoding wmv1.avi "-qscale 10" pgmyuv "-an -vcodec wmv1"
do_video_decoding
fi
###################################
if [ -n "$do_wmv2" ] ; then
# wmv2
do_video_encoding wmv2.avi "-qscale 10" pgmyuv "-an -vcodec wmv2"
do_video_decoding
fi
###################################
if [ -n "$do_h261" ] ; then
# h261
do_video_encoding h261.avi "-qscale 11" pgmyuv "-s 352x288 -an -vcodec h261"
do_video_decoding
fi
###################################
if [ -n "$do_h263" ] ; then
# h263
do_video_encoding h263.avi "-qscale 10" pgmyuv "-s 352x288 -an -vcodec h263"
do_video_decoding
fi
###################################
if [ -n "$do_h263p" ] ; then
# h263p
do_video_encoding h263p.avi "-qscale 2 -flags +umv+aiv+aic" pgmyuv "-s 352x288 -an -vcodec h263p -ps 300"
do_video_decoding
fi
###################################
if [ -n "$do_mpeg4" ] ; then
# mpeg4
do_video_encoding odivx.mp4 "-flags +mv4 -mbd bits -qscale 10" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
fi
###################################
if [ -n "$do_huffyuv" ] ; then
# huffyuv
do_video_encoding huffyuv.avi "" pgmyuv "-an -vcodec huffyuv -pix_fmt yuv422p"
do_video_decoding "" "-strict -2 -pix_fmt yuv420p"
fi
###################################
if [ -n "$do_rc" ] ; then
# mpeg4 rate control
do_video_encoding mpeg4-rc.avi "-b 400k -bf 2" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
fi
###################################
if [ -n "$do_mpeg4adv" ] ; then
# mpeg4
do_video_encoding mpeg4-adv.avi "-qscale 9 -flags +mv4+part+aic+trell -mbd bits -ps 200" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
# mpeg4
do_video_encoding mpeg4-qprd.avi "-b 450k -bf 2 -flags +mv4+trell+qprd+mv0 -cmp 2 -subcmp 2 -mbd rd" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
# mpeg4
do_video_encoding mpeg4-adap.avi "-b 550k -bf 2 -flags +mv4+trell+mv0 -cmp 1 -subcmp 2 -mbd rd -scplx_mask 0.3" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
# mpeg4
do_video_encoding mpeg4-Q.avi "-qscale 7 -flags +mv4+qpel -mbd 2 -bf 2 -cmp 1 -subcmp 2" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
fi
###################################
if [ -n "$do_mpeg4thread" ] ; then
# mpeg4
do_video_encoding mpeg4-thread.avi "-b 500k -flags +mv4+part+aic+trell -mbd bits -ps 200 -bf 2" pgmyuv "-an -vcodec mpeg4 -threads 2"
do_video_decoding
fi
###################################
if [ -n "$do_mp4psp" ] ; then
# mp4 PSP style
do_video_encoding mpeg4-PSP.mp4 "-vb 768k -s 320x240" psp "-ar 24000 -ab 32k -i $raw_src"
fi
###################################
if [ -n "$do_error" ] ; then
# damaged mpeg4
do_video_encoding error-mpeg4-adv.avi "-qscale 7 -flags +mv4+part+aic -mbd rd -ps 250 -error 10" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
fi
###################################
if [ -n "$do_mpeg4nr" ] ; then
# noise reduction
do_video_encoding mpeg4-nr.avi "-qscale 8 -flags +mv4 -mbd rd -nr 200" pgmyuv "-an -vcodec mpeg4"
do_video_decoding
fi
###################################
if [ -n "$do_mpeg1b" ] ; then
# mpeg1
do_video_encoding mpeg1b.mpg "-qscale 8 -bf 3 -ps 200" pgmyuv "-an -vcodec mpeg1video -f mpeg1video"
do_video_decoding
fi
###################################
if [ -n "$do_mjpeg" ] ; then
# mjpeg
do_video_encoding mjpeg.avi "-qscale 10" pgmyuv "-an -vcodec mjpeg -pix_fmt yuvj420p"
do_video_decoding "" "-pix_fmt yuv420p"
fi
###################################
if [ -n "$do_ljpeg" ] ; then
# ljpeg
do_video_encoding ljpeg.avi "" pgmyuv "-an -vcodec ljpeg -strict -1"
do_video_decoding
fi
###################################
if [ -n "$do_jpegls" ] ; then
# jpeg ls
do_video_encoding jpegls.avi "" pgmyuv "-an -vcodec jpegls -vtag MJPG"
do_video_decoding "" "-pix_fmt yuv420p"
fi
###################################
if [ -n "$do_rv10" ] ; then
# rv10 encoding
do_video_encoding rv10.rm "-qscale 10" pgmyuv "-an"
do_video_decoding
fi
###################################
if [ -n "$do_rv20" ] ; then
# rv20 encoding
do_video_encoding rv20.rm "-qscale 10" pgmyuv "-vcodec rv20 -an"
do_video_decoding
fi
###################################
if [ -n "$do_asv1" ] ; then
# asv1 encoding
do_video_encoding asv1.avi "-qscale 10" pgmyuv "-an -vcodec asv1"
do_video_decoding
fi
###################################
if [ -n "$do_asv2" ] ; then
# asv2 encoding
do_video_encoding asv2.avi "-qscale 10" pgmyuv "-an -vcodec asv2"
do_video_decoding
fi
###################################
if [ -n "$do_flv" ] ; then
# flv encoding
do_video_encoding flv.flv "-qscale 10" pgmyuv "-an -vcodec flv"
do_video_decoding
fi
###################################
if [ -n "$do_ffv1" ] ; then
# ffv1 encoding
do_video_encoding ffv1.avi "-strict -2" pgmyuv "-an -vcodec ffv1"
do_video_decoding
fi
###################################
if [ -n "$do_snow" ] ; then
# snow
do_video_encoding snow.avi "-strict -2" pgmyuv "-an -vcodec snow -qscale 2 -flags +qpel -me iter -dia_size 2 -cmp 12 -subcmp 12 -s 128x64"
do_video_decoding "" "-s 352x288"
fi
###################################
if [ -n "$do_snowll" ] ; then
# snow
do_video_encoding snow53.avi "-strict -2" pgmyuv "-an -vcodec snow -qscale .001 -pred 1 -flags +mv4+qpel"
do_video_decoding
fi
###################################
if [ -n "$do_dv" ] ; then
# dv
do_video_encoding dv.dv "-dct int" pgmyuv "-s pal -an"
do_video_decoding "" "-s cif"
fi
###################################
if [ -n "$do_dv50" ] ; then
# dv50
do_video_encoding dv.dv "-dct int" pgmyuv "-s pal -pix_fmt yuv422p -an"
do_video_decoding "" "-s cif -pix_fmt yuv420p"
fi
###################################
if [ -n "$do_svq1" ] ; then
# svq1
do_video_encoding svq1.mov "" pgmyuv "-an -vcodec svq1 -qscale 3 -pix_fmt yuv410p"
do_video_decoding "" "-pix_fmt yuv420p"
fi
###################################
if [ -n "$do_flashsv" ] ; then
# svq1
do_video_encoding flashsv.flv "" pgmyuv "-an -vcodec flashsv "
do_video_decoding "" "-pix_fmt yuv420p"
fi
###################################
if [ -n "$do_mp2" ] ; then
# mp2
do_audio_encoding mp2.mp2 "-ar 44100"
do_audio_decoding
$tiny_psnr $pcm_dst $pcm_ref 2 1924 >> $logfile
fi
###################################
if [ -n "$do_ac3" ] ; then
# ac3
do_audio_encoding ac3.rm "" -vn
#do_audio_decoding
fi
###################################
if [ -n "$do_g726" ] ; then
# g726
do_audio_encoding g726.wav "-ar 44100" "-ab 32k -ac 1 -ar 8000 -acodec g726"
do_audio_decoding
fi
###################################
if [ -n "$do_adpcm_ima_wav" ] ; then
# adpcm ima
do_audio_encoding adpcm_ima.wav "-ar 44100" "-acodec adpcm_ima_wav"
do_audio_decoding
fi
###################################
if [ -n "$do_adpcm_ms" ] ; then
# adpcm ms
do_audio_encoding adpcm_ms.wav "-ar 44100" "-acodec adpcm_ms"
do_audio_decoding
fi
###################################
if [ -n "$do_adpcm_yam" ] ; then
# adpcm yamaha
do_audio_encoding adpcm_yam.wav "-ar 44100" "-acodec adpcm_yamaha"
do_audio_decoding
fi
###################################
if [ -n "$do_adpcm_swf" ] ; then
# adpcm adpcm_swf
do_audio_encoding adpcm_swf.flv "-ar 44100" "-acodec adpcm_swf"
do_audio_decoding
fi
###################################
if [ -n "$do_flac" ] ; then
# flac
do_audio_encoding flac.flac "-ar 44100" "-acodec flac -compression_level 2"
do_audio_decoding
fi
###################################
if [ -n "$do_wma" ] ; then
# wmav1
do_audio_encoding wmav1.asf "-ar 44100" "-acodec wmav1"
do_ffmpeg_nomd5 $pcm_dst -y -i $file -f wav $pcm_dst
$tiny_psnr $pcm_dst $pcm_ref 2 8192 >> $logfile
# wmav2
do_audio_encoding wmav2.asf "-ar 44100" "-acodec wmav2"
do_ffmpeg_nomd5 $pcm_dst -y -i $file -f wav $pcm_dst
$tiny_psnr $pcm_dst $pcm_ref 2 8192 >> $logfile
fi
###################################
#if [ -n "$do_vorbis" ] ; then
# vorbis
#disabled because it is broken
#do_audio_encoding vorbis.asf "-ar 44100" "-acodec vorbis"
#do_audio_decoding
#fi
###################################
# libavformat testing
###################################
if [ -n "$do_libavtest" ] ; then
# avi
do_libav avi
# asf
do_libav asf "-acodec mp2" "-r 25"
# rm
file=${outfile}libav.rm
do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f s16le -i $pcm_src $file
# broken
#do_ffmpeg_crc $file -i $file
# mpegps
do_libav mpg
# mpegts
do_libav ts
# swf
do_libav swf -an
# ffm
do_libav ffm
# flv
do_libav flv -an
# mov
do_libav mov "-acodec pcm_alaw"
# nut
#do_libav nut "-acodec mp2"
# dv
do_libav dv "-ar 48000 -r 25 -s pal -ac 2"
# gxf
do_libav gxf "-ar 48000 -r 25 -s pal -ac 1"
# nut
do_libav nut "-acodec mp2"
# mkv
do_libav mkv
####################
# streamed images
# mjpeg
#file=${outfile}libav.mjpeg
#do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src $file
#do_ffmpeg_crc $file -i $file
# pbmpipe
do_streamed_images pbm
# pgmpipe
do_streamed_images pgm
# ppmpipe
do_streamed_images ppm
# gif
file=${outfile}libav.gif
do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -pix_fmt rgb24 $file
#do_ffmpeg_crc $file -i $file
# yuv4mpeg
file=${outfile}libav.y4m
do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src $file
#do_ffmpeg_crc $file -i $file
####################
# image formats
# pgm
do_image_formats pgm
# ppm
do_image_formats ppm
# bmp
do_image_formats bmp
# tga
do_image_formats tga
# tiff
do_image_formats tiff "-pix_fmt rgb24"
# sgi
do_image_formats sgi
# jpeg
do_image_formats jpg "-flags +bitexact -dct fastint -idct simple -pix_fmt yuvj420p" "-f image2"
####################
# audio only
# wav
do_audio_only wav
# alaw
do_audio_only al
# mulaw
do_audio_only ul
# au
do_audio_only au
# mmf
do_audio_only mmf
# aiff
do_audio_only aif
# voc
do_audio_only voc
# ogg
do_audio_only ogg
####################
# pix_fmt conversions
conversions="yuv420p yuv422p yuv444p yuyv422 yuv410p yuv411p yuvj420p \
yuvj422p yuvj444p rgb24 bgr24 rgb32 rgb565 rgb555 gray monow \
monob pal8 yuv440p yuvj440p"
for pix_fmt in $conversions ; do
file=${outfile}libav-${pix_fmt}.yuv
do_ffmpeg_nocheck $file -r 1 -t 1 -y -f pgmyuv -i $raw_src \
-f rawvideo -s 352x288 -pix_fmt $pix_fmt $raw_dst
do_ffmpeg $file -f rawvideo -s 352x288 -pix_fmt $pix_fmt -i $raw_dst \
-f rawvideo -s 352x288 -pix_fmt yuv444p $file
done
fi # [ -n "$do_libavtest" ]
if $diff_cmd "$logfile" "$reffile" ; then
echo
echo Regression test succeeded.
exit 0
else
echo
echo Regression test: Error.
exit 1
fi
|
paulcbetts/yikes
|
ext/ffmpeg/tests/regression.sh
|
Shell
|
gpl-2.0
| 17,918 |
#!/bin/sh
xgettext --language=Glade -o po/gnome-modem-manager.pot gnome-modem-manager.ui
msgmerge po/gnome-modem-manager-ru_RU.po po/gnome-modem-manager.pot > /tmp/ru_RU.po
mv /tmp/ru_RU.po po/gnome-modem-manager-ru_RU.po
msgmerge po/gnome-modem-manager-ua_UA.po po/gnome-modem-manager.pot > /tmp/ua_UA.po
mv /tmp/ua_UA.po po/gnome-modem-manager-ua_UA.po
|
KivApple/Gnome-Modem-Manager
|
update-locale.sh
|
Shell
|
gpl-2.0
| 355 |
#!/bin/sh
##
## Mount kernel filesystems and Create initial devices.
##
PATH=/usr/bin:/usr/sbin:/bin:/sbin
mount -t proc -o nodev,noexec,nosuid proc /proc
[ -e /dev/console ] || mknod -m 0600 /dev/console c 5 1
[ -e /dev/null ] || mknod -m 0666 /dev/null c 1 3
. /lib/onie/functions
# Set console logging to show KERN_NOTICE and above
echo "6 4 1 6" > /proc/sys/kernel/printk
##
## Mount kernel virtual file systems, ala debian init script of the
## same name. We use different options in some cases, so move the
## whole thing here to avoid re-running after the pivot.
##
mount_kernelfs()
{
# keep /tmp, /var/tmp, /run, /run/lock in tmpfs
tmpfs_size="10M"
for d in run run/lock ; do
cmd_run mkdir -p /$d
mounttmpfs /$d "defaults,noatime,size=$tmpfs_size,mode=1777"
done
# On wheezy, if /var/run is not a link
# fix it and make it a link to /run.
if [ ! -L /var/run ] ; then
rm -rf /var/run
(cd /var && ln -s ../run run)
fi
for d in tmp var/tmp ; do
cmd_run mkdir -p /$d
mounttmpfs /$d "defaults,noatime,mode=1777"
done
cmd_run mount -o nodev,noexec,nosuid -t sysfs sysfs /sys || {
log_failure_msg "Could not mount sysfs on /sys"
/sbin/boot-failure 1
}
# take care of mountdevsubfs.sh duties also
d=/run/shm
if [ ! -d $d ] ; then
cmd_run mkdir --mode=755 $d
fi
mounttmpfs $d "nosuid,nodev"
TTYGRP=5
TTYMODE=620
d=/dev/pts
if [ ! -d $d ] ; then
cmd_run mkdir --mode=755 $d
fi
cmd_run mount -o "noexec,nosuid,gid=$TTYGRP,mode=$TTYMODE" -t devpts devpts $d || {
log_failure_msg "Could not mount devpts on $d"
/sbin/boot-failure 1
}
}
log_begin_msg "Info: Mounting kernel filesystems"
mount_kernelfs
log_end_msg
# mtd devices
# Use the names found in /proc/mtd to create symlinks in /dev.
# /dev/mtd-<NAME>
mtds=$(sed -e 's/://' -e 's/"//g' /proc/mtd | tail -n +2 | awk '{ print $1 ":" $4 }')
for x in $mtds ; do
dev=/dev/${x%:*}
name=${x#*:}
if [ -n "$dev" ] ; then
[ -c $dev ] || {
log_failure_msg "$dev is not a valid MTD device."
/sbin/boot-failure 1
}
ln -sf $dev /dev/mtd-$name
fi
done
# create virtio block devices
# Use the devices found in /sys/block
vdevs=$(ls -d /sys/block/vd[a-z] 2&> /dev/null) && {
for d in $vdevs ; do
dev=$(basename $d)
major=$(sed -e 's/:.*$//' $d/dev)
rm -f /dev/$dev
mknod /dev/$dev b $major 0 || {
log_failure_msg "Problems creating /dev/$dev block device."
continue
}
for minor in $(seq 8) ; do
rm -f /dev/${dev}$minor
mknod /dev/${dev}$minor b $major $minor || {
log_failure_msg "Problems creating /dev/$dev block device."
continue
}
done
done
}
mkdir -p $ONIE_RUN_DIR
|
ahedlund/onie
|
rootconf/default/etc/init.d/makedev.sh
|
Shell
|
gpl-2.0
| 2,904 |
#!/bin/bash
# Set package versions
SVN_VERSION=1.10.0
APR_VERSION=1.6.3
APR_UTIL_VERSION=1.6.1
SERF_VERSION=1.3.9
SCONS_LOCAL_VERSION=2.3.0
# Set install path
PREFIX=/pds/opt/subversion-$SVN_VERSION-testing
# Set file/directory names
SVN=subversion-$SVN_VERSION
APR=apr-$APR_VERSION
APR_UTIL=apr-util-$APR_UTIL_VERSION
SERF=serf-$SERF_VERSION
SCONS_LOCAL=scons-local-$SCONS_LOCAL_VERSION
# Set download links
SVN_LINK=https://archive.apache.org/dist/subversion/$SVN.tar.bz2
APR_LINK=https://archive.apache.org/dist/apr/$APR.tar.bz2
APR_UTIL_LINK=https://archive.apache.org/dist/apr/$APR_UTIL.tar.bz2
SERF_LINK=https://archive.apache.org/dist/serf/$SERF.tar.bz2
SCONS_LOCAL_LINK=http://prdownloads.sourceforge.net/scons/$SCONS_LOCAL.tar.gz
# Set auxiliary directories
BASEDIR=$(pwd)/temp-install-$SVN
BUILDDIR_BASE=$BASEDIR/build
# Set log file
LOG=$BASEDIR/install.log
# Set download command
DOWNLOAD="wget -nc"
# Auxiliary exit command in case of errors
die() {
echo "error: $1" >&2
exit
}
# Install APR
INSTALLED_APR=0
install_apr() {
BUILDDIR=$BUILDDIR_BASE.$APR_UTIL
echo "Installing $APR..."
cd $BASEDIR
echo " * downloading..."
test -f $APR.tar.bz2 || $DOWNLOAD $APR_LINK >>$LOG 2>&1 || die "downloading $APR failed"
echo " * unpacking..."
test -d $APR || tar xf $APR.tar.bz2 >>$LOG 2>&1 || die "unpacking $APR failed"
test -d $BUILDDIR && rm -rf $BUILDDIR
mkdir $BUILDDIR
cd $BUILDDIR
echo " * configuring..."
$BASEDIR/$APR/configure --prefix=$PREFIX >>$LOG 2>&1 || die "configuring $APR failed"
echo " * building..."
make -j 10 >>$LOG 2>&1 || die "building $APR failed"
echo " * installing..."
make install >>$LOG 2>&1 || die "installing $APR failed"
echo "done."
echo
INSTALLED_APR=1
}
# Install APR-UTIL
INSTALLED_APR_UTIL=0
install_apr_util() {
BUILDDIR=$BUILDDIR_BASE.$APR_UTIL
echo "Installing $APR_UTIL..."
cd $BASEDIR
echo " * downloading..."
test -f $APR_UTIL.tar.bz2 || $DOWNLOAD $APR_UTIL_LINK >>$LOG 2>&1 || die "downloading $APR_UTIL failed"
echo " * unpacking..."
test -d $APR_UTIL || tar xf $APR_UTIL.tar.bz2 >>$LOG 2>&1 || die "unpacking $APR_UTIL failed"
test -d $BUILDDIR && rm -rf $BUILDDIR
mkdir $BUILDDIR
cd $BUILDDIR
echo " * configuring..."
LDFLAGS="-Wl,-rpath,$PREFIX/lib -Wl,-rpath,$PREFIX/lib64" \
$BASEDIR/$APR_UTIL/configure \
--prefix=$PREFIX --with-apr=$PREFIX >>$LOG 2>&1 || die "configuring $APR_UTIL failed"
echo " * building..."
make -j 10 >>$LOG 2>&1 || die "building $APR_UTIL failed"0
echo " * installing..."
make install >>$LOG 2>&1 || die "installing $APR_UTIL failed"l
echo "done."
echo
INSTALLED_APR_UTIL=1
}
# Install SERF
INSTALLED_SERF=0
install_serf() {
echo "Installing $SERF..."
cd $BASEDIR
echo " * downloading..."
test -f $SERF.tar.bz2 || $DOWNLOAD $SERF_LINK >>$LOG 2>&1 || die "configuring $SERF failed"
echo " * unpacking..."
test -d $SERF && rm -rf $SERF
tar xf $SERF.tar.bz2 >>$LOG 2>&1 || die "unpacking $SERF failed"
cd $SERF
echo " * setting up $SCONS_LOCAL..."
$DOWNLOAD $SCONS_LOCAL_LINK >>$LOG 2>&1 || die "setting up $SCONS failed"
tar xf $SCONS_LOCAL.tar.gz >>$LOG 2>&1 || die "setting up $SCONS failed"
ln -s scons.py scons
echo " * configuring..."
./scons \
LINKFLAGS="-Wl,-rpath,$PREFIX/lib -Wl,-rpath,$PREFIX/lib64" \
APR=$PREFIX APU=$PREFIX PREFIX=$PREFIX >>$LOG 2>&1 || die "configuring $SERF failed"
echo " * building & installing..."
./scons install >>$LOG 2>&1 || die "building & installing $SERF failed"
echo "done."
echo
INSTALLED_SERF=1
}
# Install SVN
INSTALLED_SVN=0
install_svn() {
echo "Installing $SVN..."
BUILDDIR=$BUILDDIR_BASE.$SVN
cd $BASEDIR
echo " * downloading..."
test -f $SVN.tar.bz2 || $DOWNLOAD $SVN_LINK >>$LOG 2>&1 || die "downloading $SVN failed"
echo " * unpacking..."
test -d $SVN || tar xf $SVN.tar.bz2 >>$LOG 2>&1 || die "unpacking $SVN failed"
test -d $BUILDDIR || rm -rf $BUILDDIR
mkdir $BUILDDIR
cd $BUILDDIR
echo " * configuring..."
LDFLAGS="-Wl,-rpath,$PREFIX/lib -Wl,-rpath,$PREFIX/lib64" \
$BASEDIR/$SVN/configure --prefix=$PREFIX --with-lz4=internal --with-utf8proc=internal \
--with-apr=$PREFIX \
--with-apr-util=$PREFIX \
--with-serf=$PREFIX >>$LOG 2>&1 || die "configuring $SVN failed"
echo " * building..."
make -j 10 >>$LOG 2>&1 || die "building $SVN failed"
echo " * installing..."
make install >>$LOG 2>&1 || die "installing $SVN failed"
echo "done."
echo
INSTALLED_SVN=1
}
# Print usage information
usage() {
echo "$(basename $0) [PACKAGE [PACKAGE...]]"
echo
echo "PACKAGE may be one of 'apr', 'apr-util', 'serf', 'svn'."
echo "If omitted, all packages are installed."
exit 0
}
# Show help
for i in "$@"; do
if [ "$i" = "-h" ] || [ "$i" = "--help" ]; then
usage
fi
done
# Info on install prefix
echo "Subversion $SVN_VERSION and its dependencies will be installed to '$PREFIX'."
echo
# Create temporary directory
echo "Creating temporary directory '$BASEDIR'..."
test -d $BASEDIR || mkdir $BASEDIR
>$LOG
echo
# Check arguments
if [ $# -gt 0 ]; then
# If one or more packages are requested, only install those
for package in "$@"; do
case $package in
apr) install_apr;;
apr-util) install_apr_util;;
serf) install_serf;;
svn) install_svn;;
*) usage;;
esac
done
else
# Otherwise install all packages
install_apr
install_apr_util
install_serf
install_svn
fi
# Delete temporary install directory
echo "Removing temporary directory '$BASEDIR'..."
rm -rf $BASEDIR
echo "done."
echo
# Install overview
echo "The following packages have been installed to $PREFIX:"
[ $INSTALLED_APR -eq 1 ] && echo " * $APR"
[ $INSTALLED_APR_UTIL -eq 1 ] && echo " * $APR_UTIL"
[ $INSTALLED_SERF -eq 1 ] && echo " * $SERF"
[ $INSTALLED_SVN -eq 1 ] && echo " * $SVN"
|
sloede/cobuti
|
configure/subversion/install.sh
|
Shell
|
gpl-2.0
| 5,671 |
#!/bin/sh
# This script splits out all the frameworks from kdelibs into separate repositories
# and puts them into ../frameworks/.
#
# For each framework, first it creates an empty repository and it imports the
# current code, into the original subdirectory. For example, kconfig will be a
# repository containing a tier1/kconfig/ directory. Then, in a second commit,
# the code is moved to the root directory of the repository.
#
# Doing the move in two steps like this lets git follow the history better.
# When the old kdelibs history is grafted on the new repositories, the history
# will contain a commit that deletes everything except that framework, and then
# another commit moving the framework to the root.
#
origproject=kdelibs
origbranch=frameworks
origsha1=`git rev-parse HEAD`
if [ ! -d tier1 ]; then
"echo Run this script from the toplevel of the monolithic repository, there must be a tier1 subdirectory"
exit 1
fi
dest=../frameworks
mkdir -p $dest
here=$PWD
for dir in tier1/* tier2/* tier3/* tier4/*; do
cd $here
if [ -f $dir ]; then
continue;
fi
frameworkname=`basename $dir`
frameworkdest=$dest/$frameworkname
rm -rf $frameworkdest
# eg. create ../frameworks/kjs/tier1
mkdir -p $(dirname $frameworkdest/$dir)
# eg. copy tier1/kjs to ../frameworks/kjs/tier1/kjs
cp -a $dir $frameworkdest/$dir/
cd $frameworkdest
git init
git add .
git commit -q -F - <<EOF
Initial import from the monolithic $origproject.
This is the beginning of revision history for this module. If you
want to look at revision history older than this, please refer to the
techbase wiki for how to use Git history grafting. At the time of
writing, this wiki is located here:
http://community.kde.org/Frameworks/GitOldHistory
If you have already performed the grafting and you don't see any
history beyond this commit, try running "git log" with the "--follow"
argument.
Branched from the monolithic repo, $origproject $origbranch branch, at commit
$origsha1
EOF
# eg. moves tier1/kconfig/* to .
git mv $dir/* .
git commit -q -m "Move $frameworkname code to the root directory."
echo "$frameworkdest done."
done
|
TheTypoMaster/kde-dev-scripts
|
frameworks/split_out_frameworks.sh
|
Shell
|
gpl-2.0
| 2,168 |
#!/bin/sh
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#-----------------------------------------------------------------------------
# Script to synthesize and implement the Coregen FIFO Generator
#-----------------------------------------------------------------------------
rm -rf results
mkdir results
cd results
cp ../../../k7_prime_fifo_plain.ngc .
planAhead -mode batch -source ../planAhead_ise.tcl
|
v3best/R7Lite
|
R7Lite_PCIE/fpga_code/r7lite_DMA/ipcore_dir/k7_prime_fifo_plain/implement/planAhead_ise.sh
|
Shell
|
gpl-2.0
| 2,536 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.