code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
# ------------------------------------------------------------------------
#
# Copyright 2016 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
# ------------------------------------------------------------------------
prgdir=$(dirname "$0")
script_path=$(cd "$prgdir"; pwd)
common_folder=$(cd "${script_path}/../common/scripts/"; pwd)
product_profiles=(store publisher default)
full_deployment=false
while getopts :f FLAG; do
case $FLAG in
f)
full_deployment=true
;;
esac
done
if [[ ! -z $product_profiles ]]; then
for profile in ${product_profiles[@]}; do
bash "${common_folder}/undeploy.sh" "$profile"
done
else
bash "${common_folder}/undeploy.sh"
fi
sleep 5
if [ $full_deployment == true ]; then
echo "Undeploying MySQL Services and RCs for Conf and Gov remote mounting..."
bash $script_path/../common/wso2-shared-dbs/undeploy.sh
fi
# undeploy DB service, rc and pods
kubectl delete rc,services,pods -l name="mysql-esdb"
|
wso2/kubernetes-artifacts
|
wso2es/undeploy.sh
|
Shell
|
apache-2.0
| 1,534 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.
# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files, or the manifest files should not be templated salt
set -o errexit
set -o nounset
set -o pipefail
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
mkdir -p /var/lib/kube-proxy
fi
}
# Create directories referenced in the kube-controller-manager manifest for
# bindmounts. This is used under the rkt runtime to work around
# https://github.com/kubernetes/kubernetes/issues/26816
function create-kube-controller-manager-dirs {
mkdir -p /etc/srv/kubernetes /var/ssl /etc/{ssl,openssl,pki}
}
# Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
device=$1
mountpoint=$2
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F -E lazy_itable_init=0,lazy_journal_init=0,discard "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
}
# Local ssds, if present, are mounted at /mnt/disks/ssdN.
function ensure-local-ssds() {
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
ssdnum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
ssdmount="/mnt/disks/ssd${ssdnum}/"
mkdir -p ${ssdmount}
safe-format-and-mount "${ssd}" ${ssdmount}
echo "Mounted local SSD $ssd at ${ssdmount}"
chmod a+w ${ssdmount}
else
echo "No local SSD disks found."
fi
done
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
function find-master-pd {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
function mount-master-pd {
find-master-pd
if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
return
fi
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/google-master-pd"
local -r mount_point="/mnt/disks/master-pd"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
# Contains all the data stored in etcd.
mkdir -m 700 -p "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${mount_point}/srv/kubernetes"
ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${mount_point}/srv/sshproxy"
ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd "${mount_point}/var/etcd"
chgrp -R etcd "${mount_point}/var/etcd"
}
# replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
function replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
touch "${file}"
awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${file}.filtered" && mv "${file}.filtered" "${file}"
echo "${prefix}${suffix}" >> "${file}"
}
# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.)
function create-master-auth {
echo "Creating master auth files"
local -r auth_dir="/etc/srv/kubernetes"
if [[ ! -e "${auth_dir}/ca.crt" && ! -z "${CA_CERT:-}" && ! -z "${MASTER_CERT:-}" && ! -z "${MASTER_KEY:-}" ]]; then
echo "${CA_CERT}" | base64 --decode > "${auth_dir}/ca.crt"
echo "${MASTER_CERT}" | base64 --decode > "${auth_dir}/server.cert"
echo "${MASTER_KEY}" | base64 --decode > "${auth_dir}/server.key"
fi
local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
fi
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
fi
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
fi
if [[ -n "${KUBELET_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "kubelet,uid:kubelet,system:nodes"
fi
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
fi
local use_cloud_config="false"
cat <<EOF >/etc/gce.conf
[global]
EOF
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
cat <<EOF >>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
EOF
fi
if [[ -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
EOF
fi
if [[ -n "${PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
project-id = ${PROJECT_ID}
EOF
fi
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
EOF
fi
if [[ -n "${NODE_NETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
network-name = ${NODE_NETWORK}
EOF
fi
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
local -r node_tags="${NODE_TAGS}"
else
local -r node_tags="${NODE_INSTANCE_PREFIX}"
fi
cat <<EOF >>/etc/gce.conf
node-tags = ${node_tags}
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
fi
if [[ -n "${MULTIZONE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
alpha-features = ${GCE_ALPHA_FEATURES}
EOF
fi
if [[ -n "${SECONDARY_RANGE_NAME:-}" ]]; then
use_cloud_config="true"
cat <<EOF >> /etc/gce.conf
secondary-range-name = ${SECONDARY_RANGE_NAME}
EOF
fi
if [[ "${use_cloud_config}" != "true" ]]; then
rm -f /etc/gce.conf
fi
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authn.config
clusters:
- name: gcp-authentication-server
cluster:
server: ${GCP_AUTHN_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authentication-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authz.config
clusters:
- name: gcp-authorization-server
cluster:
server: ${GCP_AUTHZ_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authorization-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
# This is the config file for the image review webhook.
cat <<EOF >/etc/gcp_image_review.config
clusters:
- name: gcp-image-review-server
cluster:
server: ${GCP_IMAGE_VERIFICATION_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-image-review-server
user: kube-apiserver
name: webhook
EOF
# This is the config for the image review admission controller.
cat <<EOF >/etc/admission_controller.config
imagePolicy:
kubeConfigFile: /etc/gcp_image_review.config
allowTTL: 30
denyTTL: 30
retryBackoff: 500
defaultAllow: true
EOF
fi
}
# Arg 1: the address of the API server
function create-kubelet-kubeconfig() {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create Kubelet kubeconfig file!"
exit 1
fi
echo "Creating kubelet kubeconfig file"
if [[ -z "${KUBELET_CA_CERT:-}" ]]; then
KUBELET_CA_CERT="${CA_CERT}"
fi
cat <<EOF >/var/lib/kubelet/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: ${KUBELET_CERT}
client-key-data: ${KUBELET_KEY}
clusters:
- name: local
cluster:
server: ${apiserver_address}
certificate-authority-data: ${KUBELET_CA_CERT}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
}
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
function create-master-kubelet-auth {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig "https://${KUBELET_APISERVER}"
fi
}
function create-kubeproxy-user-kubeconfig {
echo "Creating kube-proxy user kubeconfig file"
cat <<EOF >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubecontrollermanager-kubeconfig {
echo "Creating kube-controller-manager kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-controller-manager
cat <<EOF >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
user:
token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-controller-manager
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubescheduler-kubeconfig {
echo "Creating kube-scheduler kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
user:
token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-scheduler
name: kube-scheduler
current-context: kube-scheduler
EOF
}
function create-master-etcd-auth {
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes"
echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
fi
}
function configure-docker-daemon {
echo "Configuring the Docker daemon"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
docker_opts+=" --log-level=debug"
else
docker_opts+=" --log-level=warn"
fi
local use_net_plugin="true"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
docker_opts+=" --bip=169.254.123.1/24"
else
use_net_plugin="false"
docker_opts+=" --bridge=cbr0"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
mkdir -p /etc/systemd/system/docker.service.d/
local kubernetes_conf_dropin="/etc/systemd/system/docker.service.d/00_kubelet.conf"
cat > "${kubernetes_conf_dropin}" <<EOF
[Service]
Environment="DOCKER_OPTS=${docker_opts} ${EXTRA_DOCKER_OPTS:-}"
EOF
# Always restart to get the cbr0 change
echo "Docker daemon options updated. Restarting docker..."
systemctl daemon-reload
systemctl restart docker
}
# A helper function for loading a docker image. It keeps trying up to 5 times.
#
# $1: Full path of the docker image
function try-load-docker-image {
local -r img=$1
echo "Try to load docker image file ${img}"
# Temporarily turn off errexit, because we don't want to exit on first failure.
set +e
local -r max_attempts=5
local -i attempt_num=1
if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then
for attempt_num in $(seq 1 "${max_attempts}"); do
local aci_tmpdir="$(mktemp -t -d docker2aci.XXXXX)"
(cd "${aci_tmpdir}"; timeout 40 "${DOCKER2ACI_BIN}" "$1")
local aci_success=$?
timeout 40 "${RKT_BIN}" fetch --insecure-options=image "${aci_tmpdir}"/*.aci
local fetch_success=$?
rm -f "${aci_tmpdir}"/*.aci
rmdir "${aci_tmpdir}"
if [[ ${fetch_success} && ${aci_success} ]]; then
echo "rkt: Loaded ${img}"
break
fi
if [[ "${attempt}" == "${max_attempts}" ]]; then
echo "rkt: Failed to load image file ${img} after ${max_attempts} retries."
exit 1
fi
sleep 5
done
else
until timeout 30 docker load -i "${img}"; do
if [[ "${attempt_num}" == "${max_attempts}" ]]; then
echo "Fail to load docker image file ${img} after ${max_attempts} retries."
exit 1
else
attempt_num=$((attempt_num+1))
sleep 5
fi
done
fi
# Re-enable errexit.
set -e
}
# Loads kube-system docker images. It is better to do it before starting kubelet,
# as kubelet will restart docker daemon, which may interfere with loading images.
function load-docker-images {
echo "Start loading kube-system docker images"
local -r img_dir="${KUBE_HOME}/kube-docker-files"
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
try-load-docker-image "${img_dir}/kube-apiserver.tar"
try-load-docker-image "${img_dir}/kube-controller-manager.tar"
try-load-docker-image "${img_dir}/kube-scheduler.tar"
else
try-load-docker-image "${img_dir}/kube-proxy.tar"
fi
}
# This function assembles the kubelet systemd service file and starts it
# using systemctl.
function start-kubelet {
echo "Start kubelet"
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
echo "Using kubelet binary at ${kubelet_bin}"
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
flags+=" --allow-privileged=true"
flags+=" --cgroup-root=/"
flags+=" --cloud-provider=gce"
flags+=" --cluster-dns=${DNS_SERVER_IP}"
flags+=" --cluster-domain=${DNS_DOMAIN}"
flags+=" --pod-manifest-path=/etc/kubernetes/manifests"
flags+=" --experimental-check-node-capabilities-before-mount=true"
if [[ -n "${KUBELET_PORT:-}" ]]; then
flags+=" --port=${KUBELET_PORT}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
flags+=" --enable-debugging-handlers=false"
flags+=" --hairpin-mode=none"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
flags+=" --register-schedulable=false"
else
# Standalone mode (not widely used?)
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
fi
else # For nodes
flags+=" --enable-debugging-handlers=true"
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
flags+=" --hairpin-mode=${HAIRPIN_MODE}"
fi
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" ]]; then
flags+=" --cni-bin-dir=/opt/kubernetes/bin"
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
fi
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
flags+=" --manifest-url=${MANIFEST_URL}"
flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}"
fi
if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
fi
local node_labels=""
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${KUBERNETES_MASTER:-}" != "true" ]]; then
# Add kube-proxy daemonset label to node to avoid situation during cluster
# upgrade/downgrade when there are two instances of kube-proxy running on a node.
node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true"
fi
if [[ -n "${NODE_LABELS:-}" ]]; then
node_labels="${node_labels:+${node_labels},}${NODE_LABELS}"
fi
if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${KUBERNETES_MASTER:-}" != "true" ]]; then
node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}"
fi
if [[ -n "${node_labels:-}" ]]; then
flags+=" --node-labels=${node_labels}"
fi
if [[ -n "${NODE_TAINTS:-}" ]]; then
flags+=" --register-with-taints=${NODE_TAINTS}"
fi
if [[ -n "${EVICTION_HARD:-}" ]]; then
flags+=" --eviction-hard=${EVICTION_HARD}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
flags+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then
flags+=" --container-runtime=${CONTAINER_RUNTIME}"
flags+=" --rkt-path=${KUBE_HOME}/bin/rkt"
flags+=" --rkt-stage1-image=${RKT_STAGE1_IMAGE}"
fi
local -r kubelet_env_file="/etc/kubelet-env"
echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
# Flush iptables nat table
iptables -t nat -F || true
systemctl start kubelet.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
function prepare-log-file {
touch $1
chmod 644 $1
chown root:root $1
}
# Prepares parameters for kube-proxy manifest.
# $1 source path of kube-proxy manifest.
function prepare-kube-proxy-manifest-variables {
local -r src_file=$1;
remove-salt-config-comments "${src_file}"
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
local -r kube_proxy_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-proxy.docker_tag)
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
fi
local container_env=""
local kube_cache_mutation_detector_env_name=""
local kube_cache_mutation_detector_env_value=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="env:"
kube_cache_mutation_detector_env_name="- name: KUBE_CACHE_MUTATION_DETECTOR"
kube_cache_mutation_detector_env_value="value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
local pod_priority=""
if [[ "${ENABLE_POD_PRIORITY:-}" == "true" ]]; then
pod_priority="priorityClassName: system-node-critical"
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
sed -i -e "s@{{params}}@${params}@g" ${src_file}
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
sed -i -e "s@{{pod_priority}}@${pod_priority}@g" ${src_file}
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
fi
if [[ "${CONTAINER_RUNTIME:-}" == "rkt" ]]; then
# Work arounds for https://github.com/coreos/rkt/issues/3245 and https://github.com/coreos/rkt/issues/3264
# This is an incredibly hacky workaround. It's fragile too. If the kube-proxy command changes too much, this breaks
# TODO, this could be done much better in many other places, such as an
# init script within the container, or even within kube-proxy's code.
local extra_workaround_cmd="ln -sf /proc/self/mounts /etc/mtab; \
mount -o remount,rw /proc; \
mount -o remount,rw /proc/sys; \
mount -o remount,rw /sys; "
sed -i -e "s@-\\s\\+kube-proxy@- ${extra_workaround_cmd} kube-proxy@g" "${src_file}"
fi
}
# Starts kube-proxy static pod.
function start-kube-proxy {
echo "Start kube-proxy static pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
prepare-kube-proxy-manifest-variables "$src_file"
cp "${src_file}" /etc/kubernetes/manifests
}
# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=${ETCD_HOSTNAME:-$(hostname -s)}
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
local etcd_creds=""
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
etcd_protocol="https"
fi
for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
if [[ -n "${etcd_cluster}" ]]; then
etcd_cluster+=","
cluster_state="existing"
fi
etcd_cluster+="${etcd_host}"
done
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
remove-salt-config-comments "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$(cat "${temp_file}" | \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
if [[ -n "${ETCD_IMAGE:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ -n "${ETCD_DOCKER_REPOSITORY:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@${ETCD_DOCKER_REPOSITORY}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_repository', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
if [[ -n "${ETCD_VERSION:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
# Replace the volume host path.
sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
function start-etcd-empty-dir-cleanup-pod {
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests"
}
# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-etcd-servers {
echo "Start etcd pods"
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
prepare-log-file /var/log/etcd.log
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
prepare-log-file /var/log/etcd-events.log
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}
# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
CLOUD_CONFIG_MOUNT=""
if [[ -f /etc/gce.conf ]]; then
CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\", \"type\": \"FileOrCreate\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
}
# A helper function for removing salt configuration and comments from a file.
# This is mainly for preparing a manifest file.
#
# $1: Full path of the file to manipulate
function remove-salt-config-comments {
# Remove salt configuration.
sed -i "/^[ |\t]*{[#|%]/d" $1
# Remove comments.
sed -i "/^[ |\t]*#/d" $1
}
# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-apiserver {
echo "Start kubernetes api-server"
prepare-log-file /var/log/kube-apiserver.log
prepare-log-file /var/log/kube-apiserver-audit.log
# Calculate variables and assemble the command line.
local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --address=127.0.0.1"
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
params+=" --secure-port=443"
params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert"
params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
params+=" --enable-aggregator-routing=true"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
fi
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
params+=" --storage-backend=${STORAGE_BACKEND}"
fi
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT_SEC:-}" ]]; then
params+=" --request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT_SEC}s"
fi
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${NUM_NODES:-}" ]]; then
# If the cluster is large, increase max-requests-inflight limit in apiserver.
if [[ "${NUM_NODES}" -ge 1000 ]]; then
params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
fi
# Set amount of memory available for apiserver based on number of nodes.
# TODO: Once we start setting proper requests and limits for apiserver
# we should reuse the same logic here instead of current heuristic.
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
fi
if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then
params+=" --enable-logs-handler=false"
fi
local admission_controller_config_mount=""
local admission_controller_config_volume=""
local image_policy_webhook_config_mount=""
local image_policy_webhook_config_volume=""
if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
params+=" --admission-control=${ADMISSION_CONTROL}"
if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then
params+=" --admission-control-config-file=/etc/admission_controller.config"
# Mount the file to configure admission controllers if ImagePolicyWebhook is set.
admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false},"
admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\", \"type\": \"FileOrCreate\"}},"
# Mount the file to configure the ImagePolicyWebhook's webhook.
image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false},"
image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\", \"type\": \"FileOrCreate\"}},"
fi
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
fi
if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
params+=" --runtime-config=${RUNTIME_CONFIG}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
if [[ -n "${PROXY_SSH_USER:-}" ]]; then
params+=" --advertise-address=${vm_external_ip}"
params+=" --ssh-user=${PROXY_SSH_USER}"
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
else
params+=" --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
fi
elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then
params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
fi
local webhook_authn_config_mount=""
local webhook_authn_config_volume=""
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config"
webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},"
webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\", \"type\": \"FileOrCreate\"}},"
fi
local authorization_mode="RBAC"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
# Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then
echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this."
# Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
remove-salt-config-comments "${abac_policy_json}"
if [[ -n "${KUBE_USER:-}" ]]; then
sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
else
sed -i -e "/{{kube_user}}/d" "${abac_policy_json}"
fi
cp "${abac_policy_json}" /etc/srv/kubernetes/
fi
params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl"
authorization_mode+=",ABAC"
fi
local webhook_config_mount=""
local webhook_config_volume=""
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
authorization_mode+=",Webhook"
params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\", \"type\": \"FileOrCreate\"}},"
fi
params+=" --authorization-mode=${authorization_mode}"
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
if [[ -n "${container_env}" ]]; then
container_env="${container_env}, "
fi
container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
fi
if [[ -n "${container_env}" ]]; then
container_env="\"env\":[{${container_env}}],"
fi
src_file="${src_dir}/kube-apiserver.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
local -r kube_apiserver_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}"
sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
sed -i -e "s@{{secure_port}}@8080@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}"
sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-controller-manager {
echo "Start kubernetes controller-manager"
create-kubecontrollermanager-kubeconfig
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --use-service-account-credentials"
params+=" --cloud-provider=gce"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=" --cluster-name=${INSTANCE_PREFIX}"
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${CONCURRENT_SERVICE_SYNCS:-}" ]]; then
params+=" --concurrent-service-syncs=${CONCURRENT_SERVICE_SYNCS}"
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=" --allocate-node-cidrs=true"
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=CloudAllocator"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
local -r kube_rc_docker_tag=$(cat /opt/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
# DOCKER_REGISTRY
function start-kube-scheduler {
echo "Start kubernetes scheduler"
create-kubescheduler-kubeconfig
prepare-log-file /var/log/kube-scheduler.log
# Calculate variables and set them in the manifest.
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
fi
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
remove-salt-config-comments "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{%.*%}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
fi
}
# A helper function for copying addon manifests and set dir/files
# permissions.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2"
local -r dst_dir="/etc/kubernetes/$1/$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.json")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.json "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml.in "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Updates parameters in yaml file for prometheus-to-sd configuration, or
# removes component if it is disabled.
function update-prometheus-to-sd-parameters {
if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
else
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
fi
}
# Sets up the manifests of coreDNS for k8s addons.
function setup-coredns-manifest {
local -r coredns_file="${dst_dir}/dns/coredns.yaml"
mv "${dst_dir}/dns/coredns.yaml.in" "${coredns_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}"
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
}
# Sets up the manifests of kube-dns for k8s addons.
function setup-kube-dns-manifest {
local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml"
mv "${dst_dir}/dns/kube-dns.yaml.in" "${kubedns_file}"
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
# Replace with custom GKE kube-dns deployment.
cat > "${kubedns_file}" <<EOF
$(echo "$CUSTOM_KUBE_DNS_YAML")
EOF
update-prometheus-to-sd-parameters ${kubedns_file}
fi
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${kubedns_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${kubedns_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler"
fi
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
# prep addition kube-up specific rbac objects
setup-addon-manifests "addons" "rbac"
# Set up manifests of other addons.
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]]; then
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then
# Replace with custom GKE kube proxy.
cat > "$src_dir/kube-proxy/kube-proxy-ds.yaml" <<EOF
$(echo "$CUSTOM_KUBE_PROXY_YAML")
EOF
update-prometheus-to-sd-parameters "$src_dir/kube-proxy/kube-proxy-ds.yaml"
fi
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
setup-addon-manifests "addons" "kube-proxy"
fi
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}"
setup-addon-manifests "addons" "cluster-monitoring"
setup-addon-manifests "addons" "${file_dir}"
# Replace the salt configurations with variable values.
base_metrics_memory="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
base_eventer_memory="190Mi"
base_metrics_cpu="${HEAPSTER_GCP_BASE_CPU:-80m}"
nanny_memory="90Mi"
local -r metrics_memory_per_node="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
local -r metrics_cpu_per_node="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
fi
controller_yaml="${dst_dir}/${file_dir}"
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
controller_yaml="${controller_yaml}/heapster-controller-combined.yaml"
else
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
update-prometheus-to-sd-parameters ${controller_yaml}
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]]; then
use_old_resources="${HEAPSTER_USE_OLD_STACKDRIVER_RESOURCES:-true}"
use_new_resources="${HEAPSTER_USE_NEW_STACKDRIVER_RESOURCES:-false}"
sed -i -e "s@{{ use_old_resources }}@${use_old_resources}@g" "${controller_yaml}"
sed -i -e "s@{{ use_new_resources }}@${use_new_resources}@g" "${controller_yaml}"
fi
fi
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] ||
([[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]] &&
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]); then
if [[ "${ENABLE_METADATA_AGENT:-}" == "stackdriver" ]] &&
[[ "${METADATA_AGENT_VERSION:-}" != "" ]]; then
metadata_agent_cpu_request="${METADATA_AGENT_CPU_REQUEST:-40m}"
metadata_agent_memory_request="${METADATA_AGENT_MEMORY_REQUEST:-50Mi}"
setup-addon-manifests "addons" "metadata-agent/stackdriver"
deployment_yaml="${dst_dir}/metadata-agent/stackdriver/metadata-agent.yaml"
sed -i -e "s@{{ metadata_agent_version }}@${METADATA_AGENT_VERSION}@g" "${deployment_yaml}"
sed -i -e "s@{{ metadata_agent_cpu_request }}@${metadata_agent_cpu_request}@g" "${deployment_yaml}"
sed -i -e "s@{{ metadata_agent_memory_request }}@${metadata_agent_memory_request}@g" "${deployment_yaml}"
fi
fi
if [[ "${ENABLE_METRICS_SERVER:-}" == "true" ]]; then
setup-addon-manifests "addons" "metrics-server"
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns"
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
setup-coredns-manifest
else
setup-kube-dns-manifest
fi
fi
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
setup-addon-manifests "addons" "registry"
local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}"
mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}"
# Replace the salt configurations with variable values.
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
update-prometheus-to-sd-parameters ${event_exporter_yaml}
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
setup-addon-manifests "addons" "node-problem-detector"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range"
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Configure Calico CNI directory.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/opt/cni/bin@g" "${ds_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${ENABLE_METADATA_CONCEALMENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
local -r metadata_proxy_yaml="${dst_dir}/metadata-proxy/gce/metadata-proxy.yaml"
update-prometheus-to-sd-parameters ${metadata_proxy_yaml}
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
}
# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
/etc/kubernetes/manifests/
}
# Starts kube-registry proxy
function start-kube-registry-proxy {
echo "Start kube-registry-proxy"
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}
# Starts a l7 loadbalancing controller for ingress.
function start-lb-controller {
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
local -r glbc_manifest="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest"
if [[ ! -z "${GCE_GLBC_IMAGE:-}" ]]; then
sed -i "s@image:.*@image: ${GCE_GLBC_IMAGE}@" "${glbc_manifest}"
fi
cp "${glbc_manifest}" /etc/kubernetes/manifests/
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Install and setup rkt
# TODO(euank): There should be a toggle to use the distro-provided rkt binary
# Sets the following variables:
# RKT_BIN: the path to the rkt binary
function setup-rkt {
local rkt_bin="${KUBE_HOME}/bin/rkt"
if [[ -x "${rkt_bin}" ]]; then
# idempotency, skip downloading this time
# TODO(euank): this might get in the way of updates, but 'file busy'
# because of rkt-api would too
RKT_BIN="${rkt_bin}"
return
fi
mkdir -p /etc/rkt "${KUBE_HOME}/download/"
local rkt_tar="${KUBE_HOME}/download/rkt.tar.gz"
local rkt_tmpdir=$(mktemp -d "${KUBE_HOME}/rkt_download.XXXXX")
curl --retry 5 --retry-delay 3 --fail --silent --show-error \
--location --create-dirs --output "${rkt_tar}" \
https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz
tar --strip-components=1 -xf "${rkt_tar}" -C "${rkt_tmpdir}" --overwrite
mv "${rkt_tmpdir}/rkt" "${rkt_bin}"
if [[ ! -x "${rkt_bin}" ]]; then
echo "Could not download requested rkt binary"
exit 1
fi
RKT_BIN="${rkt_bin}"
# Cache rkt stage1 images for speed
"${RKT_BIN}" fetch --insecure-options=image "${rkt_tmpdir}"/*.aci
rm -rf "${rkt_tmpdir}"
cat > /etc/systemd/system/rkt-api.service <<EOF
[Unit]
Description=rkt api service
Documentation=http://github.com/coreos/rkt
After=network.target
[Service]
ExecStart=${RKT_BIN} api-service --listen=127.0.0.1:15441
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable rkt-api.service
systemctl start rkt-api.service
}
# Install docker2aci, needed to load server images if using rkt runtime
# This should be removed once rkt can fetch on-disk docker tarballs directly
# Sets the following variables:
# DOCKER2ACI_BIN: the path to the docker2aci binary
function install-docker2aci {
local tar_path="${KUBE_HOME}/download/docker2aci.tar.gz"
local tmp_path="${KUBE_HOME}/docker2aci"
mkdir -p "${KUBE_HOME}/download/" "${tmp_path}"
curl --retry 5 --retry-delay 3 --fail --silent --show-error \
--location --create-dirs --output "${tar_path}" \
https://github.com/appc/docker2aci/releases/download/v0.14.0/docker2aci-v0.14.0.tar.gz
tar --strip-components=1 -xf "${tar_path}" -C "${tmp_path}" --overwrite
DOCKER2ACI_BIN="${KUBE_HOME}/bin/docker2aci"
mv "${tmp_path}/docker2aci" "${DOCKER2ACI_BIN}"
}
########### Main Function ###########
echo "Start to configure instance for kubernetes"
# Note: this name doesn't make as much sense here as in gci where it's actually
# /home/kubernetes, but for ease of diff-ing, retain the same variable name
KUBE_HOME="/opt/kubernetes"
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
fi
source "${KUBE_HOME}/kube-env"
if [[ -n "${KUBE_USER:-}" ]]; then
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER format."
exit 1
fi
fi
# generate the controller manager and scheduler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
# KUBERNETES_CONTAINER_RUNTIME is set by the `kube-env` file, but it's a bit of a mouthful
if [[ "${CONTAINER_RUNTIME:-}" == "" ]]; then
CONTAINER_RUNTIME="${KUBERNETES_CONTAINER_RUNTIME:-docker}"
fi
create-dirs
ensure-local-ssds
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
mount-master-pd
create-master-auth
create-master-kubelet-auth
create-master-etcd-auth
else
create-kubelet-kubeconfig "https://${KUBERNETES_MASTER_NAME}"
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
create-kubeproxy-user-kubeconfig
fi
fi
if [[ "${KUBERNETES_CONTAINER_RUNTIME:-}" == "rkt" ]]; then
systemctl stop docker
systemctl disable docker
setup-rkt
install-docker2aci
create-kube-controller-manager-dirs
else
configure-docker-daemon
fi
load-docker-images
start-kubelet
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
compute-master-manifest-variables
start-etcd-servers
start-etcd-empty-dir-cleanup-pod
start-kube-apiserver
start-kube-controller-manager
start-kube-scheduler
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
else
if [[ "${KUBE_PROXY_DAEMONSET:-}" != "true" ]]; then
start-kube-proxy
fi
# Kube-registry-proxy.
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
start-kube-registry-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
fi
echo "Done for the configuration for kubernetes"
|
dnardo/kubernetes
|
cluster/gce/container-linux/configure-helper.sh
|
Shell
|
apache-2.0
| 63,148 |
#!/bin/bash
# Usage: ./quest-sesame-test.sh
cd $ONTOP_BUILD_PATH/obdalib-protege41
mvn clean
mvn site
rm -rf $ONTOP_REPORT_PATH/obdalib-protege41
cp -R target/site $ONTOP_REPORT_PATH/obdalib-protege41
|
ghxiao/ontop-spatial
|
scripts/obdalib-protege41-test.sh
|
Shell
|
apache-2.0
| 204 |
#!/bin/bash
# SCRIPT TO STOP, SET CONFIG, AND START HADOOP and run HiBench in AZURE
usage() {
echo "Usage: $0 [-d disk <SSD|HDD|RL{1,2,3}|R{1,2,3}>] [-b benchmark <_min|_10>] [-r replicaton <positive int>]\
[-m max mappers and reducers <positive int>] [-i io factor <positive int>] [-p port prefix <3|4|5>]\
[-I io.file <positive int>] [-l list of benchmarks <space separated string>] [-c compression <0 (dissabled)|1|2|3>]\
[-z <block size in bytes>] [-s (save prepare)] -N (don't delete files)" 1>&2;
echo "example: $0 -n IB -d SSD -r 1 -m 12 -i 10 -p 3 -b _min -I 4096 -l wordcount -c 1"
exit 1;
}
#make sure all spawned background jobs are killed when done (ssh ie ssh port forwarding)
#trap "kill 0" SIGINT SIGTERM EXIT
trap 'stop_hadoop; stop_monit; kill $(jobs -p); exit;' SIGINT SIGTERM EXIT
OPTIND=1 #A POSIX variable, reset in case getopts has been used previously in the shell.
# Default values
VERBOSE=0
NET="ETH"
DISK="HDD"
BENCH=""
REPLICATION=1
MAX_MAPS=8
IO_FACTOR=10
PORT_PREFIX=3
IO_FILE=65536
LIST_BENCHS="wordcount sort terasort kmeans pagerank bayes dfsioe" #nutchindexing hivebench
COMPRESS_GLOBAL=0
COMPRESS_TYPE=0
#COMPRESS_GLOBAL=1
#COMPRESS_TYPE=1
#COMPRESS_CODEC_GLOBAL=org.apache.hadoop.io.compress.DefaultCodec
#COMPRESS_CODEC_GLOBAL=com.hadoop.compression.lzo.LzoCodec
#COMPRESS_CODEC_GLOBAL=org.apache.hadoop.io.compress.SnappyCodec
SAVE_BENCH=""
BLOCK_SIZE=67108864
DELETE_HDFS=1
while getopts ":h:?:v:b:r:n:d:m:i:p:l:I:c:z:sN" opt; do
case "$opt" in
h|\?)
usage
;;
v)
VERBOSE=1
;;
n)
NET=$OPTARG
[ "$NET" == "IB" ] || [ "$NET" == "ETH" ] || usage
;;
d)
DISK=$OPTARG
[ "$DISK" == "SSD" ] || [ "$DISK" == "HDD" ] || [ "$DISK" == "RR1" ] || [ "$DISK" == "RR2" ] || [ "$DISK" == "RR3" ] || [ "$DISK" == "RL1" ] || [ "$DISK" == "RL2" ] || [ "$DISK" == "RL3" ] || usage
;;
b)
BENCH=$OPTARG
[ "$BENCH" == "_10" ] || [ "$BENCH" == "_min" ] || usage
;;
r)
REPLICATION=$OPTARG
((REPLICATION > 0)) || usage
;;
m)
MAX_MAPS=$OPTARG
((MAX_MAPS > 0 && MAX_MAPS < 33)) || usage
;;
i)
IO_FACTOR=$OPTARG
((IO_FACTOR > 0)) || usage
;;
I)
IO_FILE=$OPTARG
((IO_FILE > 0)) || usage
;;
p)
PORT_PREFIX=$OPTARG
((PORT_PREFIX > 0 && PORT_PREFIX < 6)) || usage
;;
c)
if [ "$OPTARG" == "0" ] ; then
COMPRESS_GLOBAL=0
COMPRESS_TYPE=0
elif [ "$OPTARG" == "1" ] ; then
COMPRESS_TYPE=1
COMPRESS_CODEC_GLOBAL=org.apache.hadoop.io.compress.DefaultCodec
elif [ "$OPTARG" == "2" ] ; then
COMPRESS_TYPE=2
COMPRESS_CODEC_GLOBAL=com.hadoop.compression.lzo.LzoCodec
elif [ "$OPTARG" == "3" ] ; then
COMPRESS_TYPE=3
COMPRESS_CODEC_GLOBAL=org.apache.hadoop.io.compress.SnappyCodec
fi
;;
l)
LIST_BENCHS=$OPTARG
;;
z)
BLOCK_SIZE=$OPTARG
;;
s)
SAVE_BENCH=1
;;
N)
DELETE_HDFS=0
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
CLUSTER_NAME="al-03"
NUMBER_OF_HOSTS="9"
NUMBER_OF_DATA_NODES="8"
userAloja="pristine"
#if [ "${NET}" == "IB" ] ; then
# host1="al-1001-ib0"
# host2="al-1002-ib0"
# host3="al-1003-ib0"
# host4="al-1004-ib0"
# IFACE="ib0"
#else
host1="${CLUSTER_NAME}-00"
host2="${CLUSTER_NAME}-01"
host3="${CLUSTER_NAME}-02"
host4="${CLUSTER_NAME}-03"
host5="${CLUSTER_NAME}-04"
host6="${CLUSTER_NAME}-05"
host7="${CLUSTER_NAME}-06"
host8="${CLUSTER_NAME}-07"
host9="${CLUSTER_NAME}-08"
IFACE="eth0"
#fi
DSH="dsh -M -m $host1,$host2,$host3,$host4,$host5,$host6,$host7,$host8,$host9"
DSH_MASTER="ssh $host1"
DSH_SLAVE="ssh $host1" #TODO check if OK
if [ "$DISK" == "SSD" ] ; then
HDD="/scratch/local/hadoop-hibench_$PORT_PREFIX"
elif [ "$DISK" == "HDD" ] || [ "$DISK" == "RL1" ] || [ "$DISK" == "RL2" ] || [ "$DISK" == "RL3" ] ; then
HDD="/scratch/local/hadoop-hibench_$PORT_PREFIX"
elif [ "$DISK" == "RR1" ] || [ "$DISK" == "RR2" ] || [ "$DISK" == "RR3" ]; then
HDD="/scratch/attached/1/hadoop-hibench_$PORT_PREFIX"
else
echo "Incorrect disk specified: $DISK"
exit 1
fi
BASE_DIR="/home/$userAloja/share/"
SOURCE_DIR="/scratch/local/aplic"
HADOOP_VERSION="hadoop-1.0.3"
H_DIR="$HDD/aplic/$HADOOP_VERSION" #execution dir
HIB_DIR="$SOURCE_DIR/HiBench${BENCH}/"
#Location of prepared inputs
SAVE_LOCATION="/scratch/local/HiBench_prepare/"
DATE='date +%Y%m%d_%H%M%S'
CONF="conf_${NET}_${DISK}_b${BENCH}_m${MAX_MAPS}_i${IO_FACTOR}_r${REPLICATION}_I${IO_FILE}_c${COMPRESS_TYPE}_z$((BLOCK_SIZE / 1048576 ))_${CLUSTER_NAME}"
JOB_NAME="`$DATE`_$CONF"
JOB_PATH="/home/$userAloja/share/jobs_$CLUSTER_NAME/$JOB_NAME"
LOG_PATH="$JOB_PATH/log_${JOB_NAME}.log"
LOG="2>&1 |tee -a $LOG_PATH"
#export HADOOP_HOME="$HADOOP_DIR"
export JAVA_HOME="$SOURCE_DIR/jdk1.7.0_25"
bwm_source="$SOURCE_DIR/bin/bwm-ng"
echo "$(date '+%s') : STARTING EXECUTION of $JOB_NAME"
#create dir to save files in one host
$DSH "mkdir -p $JOB_PATH"
$DSH "touch $LOG_PATH"
logger(){
stamp=$(date '+%s')
echo "${stamp} : $1" 2>&1 |tee -a $LOG_PATH
#log to zabbix
#zabbix_sender "hadoop.status $stamp $1"
}
#temporary to avoid read-only file system errors
logger "Re-mounting attached disks"
$DSH "sudo umount /scratch/attached/1 /scratch/attached/2 /scratch/attached/3; sudo mount -a"
logger "Setting scratch permissions"
$DSH "sudo chown -R $userAloja: /scratch"
#only copy files if version has changed (to save time in azure)
logger "Checking if to generate source dirs"
for host_number in $(seq 1 "$NUMBER_OF_HOSTS") ; do
host_tmp="host${host_number}" #for variable variable name
logger " for host ${!host_tmp}"
if [ "$(ssh "${!host_tmp}" "[ "\$\(cat $BASE_DIR/aplic/aplic_version\)" == "\$\(cat $SOURCE_DIR/aplic_version 2\> /dev/null \)" ] && echo 'OK' || echo 'KO'" )" != "OK" ] ; then
logger "At least host ${!host_tmp} did not have source dirs. Generating source dirs for ALL hosts"
$DSH "mkdir -p $SOURCE_DIR; cp -ru $BASE_DIR/aplic/* $SOURCE_DIR/"
break #dont need to check after one is missing
else
logger " Host ${!host_tmp} up to date"
fi
done
#if [ "$(cat $BASE_DIR/aplic/aplic_version)" != "$(cat $SOURCE_DIR/aplic_version)" ] ; then
# logger "Generating source dirs"
# $DSH "mkdir -p $SOURCE_DIR; cp -ru $BASE_DIR/aplic/* $SOURCE_DIR/"
# #$DSH "cp -ru $SOURCE_DIR/${HADOOP_VERSION}-home $SOURCE_DIR/${HADOOP_VERSION}" #rm -rf $SOURCE_DIR/${HADOOP_VERSION}-scratch# logger "Source dirs up to date"
#fi
zabbix_sender(){
:
#echo "al-1001 $1" | /home/pristine/share/aplic/zabbix/bin/zabbix_sender -c /home/pristine/share/aplic/zabbix/conf/zabbix_agentd_az.conf -T -i - 2>&1 > /dev/null
#>> $LOG_PATH
}
logger "Job name: $JOB_NAME"
logger "Job path: $JOB_PATH"
logger "Log path: $LOG_PATH"
logger "Disk location: $HDD"
logger "Conf: $CONF"
logger "HiBench: $HIB_DIR"
logger "Benchs to execute: $LIST_BENCHS"
logger "DSH: $DSH"
logger ""
##For zabbix monitoring make sure IB ports are available
#ssh_tunnel="ssh -N -L al-1001:30070:al-1001-ib0:30070 -L al-1001:30030:al-1001-ib0:30030 al-1001"
##first make sure we kill any previous, even if we don't need it
#pkill -f "ssh -N -L"
##"$ssh_tunnel"
#
#if [ "${NET}" == "IB" ] ; then
# $ssh_tunnel 2>&1 |tee -a $LOG_PATH &
#fi
#stop running instances with the previous conf
#$DSH_MASTER $H_DIR/bin/stop-all.sh 2>&1 >> $LOG_PATH
#prepare selected conf
#$DSH "rm -rf $DIR/conf/*" 2>&1 |tee -a $LOG_PATH
#$DSH "cp -r $DIR/$CONF/* $DIR/conf/" 2>&1 |tee -a $LOG_PATH
prepare_config(){
logger "Preparing exe dir"
if [ "$DELETE_HDFS" == "1" ] ; then
logger "Deleting previous PORT files"
$DSH "rm -rf $HDD/*" 2>&1 |tee -a $LOG_PATH
$DSH "rm -rf /scratch/attached/{1,2,3}/hadoop-hibench_$PORT_PREFIX/*" 2>&1 |tee -a $LOG_PATH
else
$DSH "rm -rf $HDD/{aplic,logs}" 2>&1 |tee -a $LOG_PATH
fi
logger "Creating source dir and Copying Hadoop"
$DSH "mkdir -p /scratch/attached/{1,2,3}/hadoop-hibench_$PORT_PREFIX/{aplic,hadoop,logs}" 2>&1 |tee -a $LOG_PATH
$DSH "mkdir -p $HDD/{aplic,hadoop,logs}" 2>&1 |tee -a $LOG_PATH
$DSH "mkdir -p $H_DIR" 2>&1 |tee -a $LOG_PATH
echo -e "HDD=$HDD \nHDIR=${H_DIR}"
$DSH "cp -ru $SOURCE_DIR/${HADOOP_VERSION}-scratch/* $H_DIR/" 2>&1 |tee -a $LOG_PATH
vmstat="$HDD/aplic/vmstat_$PORT_PREFIX"
bwm="$HDD/aplic/bwm-ng_$PORT_PREFIX"
sar="$HDD/aplic/sar_$PORT_PREFIX"
$DSH "cp /usr/bin/vmstat $vmstat" 2>&1 |tee -a $LOG_PATH
$DSH "cp $bwm_source $bwm" 2>&1 |tee -a $LOG_PATH
$DSH "cp /usr/bin/sar $sar" 2>&1 |tee -a $LOG_PATH
logger "Preparing config"
$DSH "rm -rf $H_DIR/conf/*" 2>&1 |tee -a $LOG_PATH
MASTER="$host1"
IO_MB="$((IO_FACTOR * 10))"
if [ "$DISK" == "SSD" ] || [ "$DISK" == "HDD" ] ; then
HDFS_DIR="$HDD"
elif [ "$DISK" == "RL1" ] || [ "$DISK" == "RR1" ]; then
HDFS_DIR="/scratch/attached/1/hadoop-hibench_$PORT_PREFIX/hadoop"
elif [ "$DISK" == "RL2" ] || [ "$DISK" == "RR2" ]; then
HDFS_DIR="/scratch/attached/1/hadoop-hibench_$PORT_PREFIX/hadoop\,/scratch/attached/2/hadoop-hibench_$PORT_PREFIX/hadoop"
elif [ "$DISK" == "RL3" ] || [ "$DISK" == "RR3" ]; then
HDFS_DIR="/scratch/attached/1/hadoop-hibench_$PORT_PREFIX/hadoop\,/scratch/attached/2/hadoop-hibench_$PORT_PREFIX/hadoop\,/scratch/attached/3/hadoop-hibench_$PORT_PREFIX/hadoop"
else
echo "Incorrect disk specified2: $DISK"
exit 1
fi
MAX_REDS="$MAX_MAPS"
subs=$(cat <<EOF
s,##JAVA_HOME##,$JAVA_HOME,g;
s,##LOG_DIR##,$HDD/logs,g;
s,##REPLICATION##,$REPLICATION,g;
s,##MASTER##,$MASTER,g;
s,##NAMENODE##,$MASTER,g;
s,##TMP_DIR##,$HDD,g;
s,##HDFS_DIR##,$HDFS_DIR,g;
s,##MAX_MAPS##,$MAX_MAPS,g;
s,##MAX_REDS##,$MAX_REDS,g;
s,##IFACE##,$IFACE,g;
s,##IO_FACTOR##,$IO_FACTOR,g;
s,##IO_MB##,$IO_MB,g;
s,##PORT_PREFIX##,$PORT_PREFIX,g;
s,##IO_FILE##,$IO_FILE,g;
s,##BLOCK_SIZE##,$BLOCK_SIZE,g;
EOF
)
slaves=$(cat <<EOF
$host2
$host3
$host4
$host5
$host6
$host7
$host8
$host9
EOF
)
#to avoid perl warnings
export LC_CTYPE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
$DSH "/usr/bin/perl -pe \"$subs\" $H_DIR/conf_template/hadoop-env.sh > $H_DIR/conf/hadoop-env.sh" 2>&1 |tee -a $LOG_PATH
$DSH "/usr/bin/perl -pe \"$subs\" $H_DIR/conf_template/core-site.xml > $H_DIR/conf/core-site.xml" 2>&1 |tee -a $LOG_PATH
$DSH "/usr/bin/perl -pe \"$subs\" $H_DIR/conf_template/hdfs-site.xml > $H_DIR/conf/hdfs-site.xml" 2>&1 |tee -a $LOG_PATH
$DSH "/usr/bin/perl -pe \"$subs\" $H_DIR/conf_template/mapred-site.xml > $H_DIR/conf/mapred-site.xml" 2>&1 |tee -a $LOG_PATH
logger "Replacing per host config"
for host_number in $(seq 1 "$NUMBER_OF_HOSTS") ; do
host_tmp="host${host_number}" #for variable variable name
ssh "${!host_tmp}" "/usr/bin/perl -pe \"s,##HOST##,${!host_tmp},g;\" $H_DIR/conf/mapred-site.xml > $H_DIR/conf/mapred-site.xml.tmp; rm $H_DIR/conf/mapred-site.xml; mv $H_DIR/conf/mapred-site.xml.tmp $H_DIR/conf/mapred-site.xml" 2>&1 |tee -a $LOG_PATH &
ssh "${!host_tmp}" "/usr/bin/perl -pe \"s,##HOST##,${!host_tmp},g;\" $H_DIR/conf/hdfs-site.xml > $H_DIR/conf/hdfs-site.xml.tmp; rm $H_DIR/conf/hdfs-site.xml; mv $H_DIR/conf/hdfs-site.xml.tmp $H_DIR/conf/hdfs-site.xml" 2>&1 |tee -a $LOG_PATH &
done
$DSH "echo -e \"$MASTER\" > $H_DIR/conf/masters" 2>&1 |tee -a $LOG_PATH
$DSH "echo -e \"$slaves\" > $H_DIR/conf/slaves" 2>&1 |tee -a $LOG_PATH
#save config
logger "Saving config"
create_conf_dirs=""
for host_number in $(seq 1 "$NUMBER_OF_HOSTS") ; do
host_tmp="host${host_number}" #for variable variable name
create_conf_dirs="$create_conf_dirs mkdir -p $JOB_PATH/conf_${!host_tmp} "
done
$DSH "$create_conf_dirs" 2>&1 |tee -a $LOG_PATH
for host_number in $(seq 1 "$NUMBER_OF_HOSTS") ; do
host_tmp="host${host_number}" #for variable variable name
ssh "${!host_tmp}" "cp $H_DIR/conf/* $JOB_PATH/conf_${!host_tmp}" 2>&1 |tee -a $LOG_PATH &
done
}
prepare_config ${NET} ${DISK} ${BENCH}
full_name="Not SET"
get_bench_name(){
if [ "$1" == "wordcount" ] ; then
full_name="Wordcount"
elif [ "$1" == "sort" ] ; then
full_name="Sort"
elif [ "$1" == "terasort" ] ; then
full_name="Terasort"
elif [ "$1" == "kmeans" ] ; then
full_name="KMeans"
elif [ "$1" == "pagerank" ] ; then
full_name="Pagerank"
elif [ "$1" == "bayes" ] ; then
full_name="Bayes"
elif [ "$1" == "hivebench" ] ; then
full_name="Hivebench"
elif [ "$1" == "dfsioe" ] ; then
full_name="DFSIOE"
else
full_name="INVALID"
fi
}
restart_hadoop(){
logger "Restart Hadoop"
#just in case stop all first
$DSH_MASTER $H_DIR/bin/stop-all.sh 2>&1 >> $LOG_PATH
#delete previous run logs
$DSH "rm -rf $HDD/logs; mkdir -p $HDD/logs" 2>&1 |tee -a $LOG_PATH
if [ "$DELETE_HDFS" == "1" ] ; then
logger "Deleting previous Hadoop HDFS"
#$DSH "rm -rf /scratch/attached/{1,2,3}/hadoop-hibench_$PORT_PREFIX/*" 2>&1 |tee -a $LOG_PATH
#$DSH "mkdir -p /scratch/attached/{1,2,3}/hadoop-hibench_$PORT_PREFIX/" 2>&1 |tee -a $LOG_PATH
$DSH "rm -rf $HDD/{dfs,mapred,logs}; mkdir -p $HDD/logs" 2>&1 |tee -a $LOG_PATH
#send multiple yes to format
$DSH_MASTER "yes Y | $H_DIR/bin/hadoop namenode -format" 2>&1 |tee -a $LOG_PATH
$DSH_MASTER "yes Y | $H_DIR/bin/hadoop datanode -format" 2>&1 |tee -a $LOG_PATH
fi
$DSH_MASTER $H_DIR/bin/start-all.sh 2>&1 |tee -a $LOG_PATH
for i in {0..300} #3mins
do
local report=$($DSH_MASTER $H_DIR/bin/hadoop dfsadmin -report 2> /dev/null)
local num=$(echo "$report" | grep "Datanodes available" | awk '{print $3}')
local safe_mode=$(echo "$report" | grep "Safe mode is ON")
echo $report 2>&1 |tee -a $LOG_PATH
if [ "$num" == "$NUMBER_OF_DATA_NODES" ] ; then
if [[ -z $safe_mode ]] ; then
#everything fine continue
break
elif [ "$i" == "30" ] ; then
logger "Still in Safe mode, MANUALLY RESETTING SAFE MODE wating for $i seconds"
$DSH_MASTER $H_DIR/bin/hadoop dfsadmin -safemode leave 2>&1 |tee -a $LOG_PATH
else
logger "Still in Safe mode, wating for $i seconds"
fi
elif [ "$i" == "60" ] && [[ -z $1 ]] ; then
#try to restart hadoop deleting files and prepare again files
$DSH_MASTER $H_DIR/bin/stop-all.sh 2>&1 |tee -a $LOG_PATH
$DSH_MASTER $H_DIR/bin/start-all.sh 2>&1 |tee -a $LOG_PATH
elif [ "$i" == "180" ] && [[ -z $1 ]] ; then
#try to restart hadoop deleting files and prepare again files
logger "Reseting config to retry DELETE_HDFS WAS SET TO: $DELETE_HDFS"
DELETE_HDFS="1"
restart_hadoop no_retry
elif [ "$i" == "120" ] ; then
logger "$num/$NUMBER_OF_DATA_NODES Datanodes available, EXIT"
exit 1
else
logger "$num/$NUMBER_OF_DATA_NODES Datanodes available, wating for $i seconds"
sleep 1
fi
done
logger "Hadoop ready"
}
restart_monit(){
logger "Restart Monit"
stop_monit
$DSH "$vmstat -n 1 >> $HDD/vmstat-\$(hostname).log &" 2>&1 |tee -a $LOG_PATH
$DSH "$bwm -o csv -I bond0,eth0,eth1,eth2,eth3,ib0,ib1 -u bytes -t 1000 >> $HDD/bwm-\$(hostname).log &" 2>&1 |tee -a $LOG_PATH
$DSH "$sar -o $HDD/sar-\$(hostname).sar 1 >/dev/null 2>&1 &" 2>&1 |tee -a $LOG_PATH
logger "Monit ready"
}
stop_hadoop(){
logger "Stop Hadoop"
$DSH_MASTER $H_DIR/bin/stop-all.sh 2>&1 |tee -a $LOG_PATH
logger "Stop Hadoop ready"
}
stop_monit(){
logger "Stop monit"
$DSH "killall -9 $vmstat" #2>&1 |tee -a $LOG_PATH
$DSH "killall -9 $bwm" #2>&1 |tee -a $LOG_PATH
$DSH "killall -9 $sar" #2>&1 >> $LOG_PATH
logger "Stop monit ready"
}
save_bench() {
logger "Saving benchmark $1"
$DSH "mkdir -p $JOB_PATH/$1" 2>&1 |tee -a $LOG_PATH
$DSH "mv $HDD/{bwm,vmstat}*.log $HDD/sar*.sar $JOB_PATH/$1/" 2>&1 |tee -a $LOG_PATH
#we cannot move hadoop files
#take into account naming *.date when changing dates
#$DSH "cp $HDD/logs/hadoop-*.{log,out}* $JOB_PATH/$1/" 2>&1 |tee -a $LOG_PATH
$DSH "cp -r $HDD/logs/* $JOB_PATH/$1/" 2>&1 |tee -a $LOG_PATH
$DSH "cp $HDD/logs/job*.xml $JOB_PATH/$1/" 2>&1 |tee -a $LOG_PATH
#$DSH "cp $HADOOP_DIR/conf/* $JOB_PATH/$1" 2>&1 |tee -a $LOG_PATH
cp "${HIB_DIR}$bench/hibench.report" "$JOB_PATH/$1/"
logger "Copying files to master == scp -r $JOB_PATH ${host1}:$JOB_PATH"
#$DSH "scp -r $JOB_PATH ${host1}:$JOB_PATH" 2>&1 |tee -a $LOG_PATH
#pending, delete
logger "Compresing and deleting $1"
$DSH_MASTER "cd $JOB_PATH; tar -cjf $JOB_PATH/$1.tar.bz2 $1;" 2>&1 |tee -a $LOG_PATH
tar -cjf $JOB_PATH/host_conf.tar.bz2 conf_*;
$DSH_MASTER "rm -rf $JOB_PATH/$1" 2>&1 |tee -a $LOG_PATH
$JOB_PATH/conf_*
#empy the contents from original disk TODO check if necessary still
$DSH "for i in $HDD/hadoop-*.{log,out}; do echo "" > $i; done;" 2>&1 |tee -a $LOG_PATH
logger "Done saving benchmark $1"
}
#before running hibench, set exports and vars
EXP="export JAVA_HOME=$JAVA_HOME && \
export HADOOP_HOME=$H_DIR && \
export COMPRESS_GLOBAL=$COMPRESS_GLOBAL && \
export COMPRESS_CODEC_GLOBAL=$COMPRESS_CODEC_GLOBAL && \
export NUM_MAPS=$MAX_MAPS && \
export NUM_REDS=$MAX_MAPS && \
"
execute_bench(){
#clear buffer cache exept for prepare
# if [[ -z $3 ]] ; then
# logger "Clearing Buffer cache"
# $DSH "sudo /usr/local/sbin/drop_caches" 2>&1 |tee -a $LOG_PATH
# fi
logger "# Checking disk space with df BEFORE"
$DSH "df -h" 2>&1 |tee -a $LOG_PATH
logger "# Checking hadoop folder space BEFORE"
$DSH "du -sh $HDD/*" 2>&1 |tee -a $LOG_PATH
restart_monit
#TODO fix empty variable problem when not echoing
local start_exec=$(date '+%s') && echo "start $start_exec end $end_exec" 2>&1 |tee -a $LOG_PATH
local start_date=$(date --date='+1 hour' '+%Y%m%d%H%M%S') && echo "end $start_date" 2>&1 |tee -a $LOG_PATH
logger "# EXECUTING ${3}${1}"
$DSH_SLAVE "$EXP /usr/bin/time -f 'Time ${3}${1} %e' $2" 2>&1 |tee -a $LOG_PATH
local end_exec=$(date '+%s') && echo "start $start_exec end $end_exec" 2>&1 |tee -a $LOG_PATH
logger "# DONE EXECUTING $1"
local total_secs=$(expr $end_exec - $start_exec) && echo "end total sec $total_secs" 2>&1 |tee -a $LOG_PATH
url="http://minerva.bsc.es:8099/zabbix/screens.php?&fullscreen=0&elementid=AZ&stime=${start_date}&period=${total_secs}"
echo "SENDING: hibench.runs $end_exec <a href='$url'>${3}${1} $CONF</a> <strong>Time:</strong> $total_secs s." 2>&1 |tee -a $LOG_PATH
zabbix_sender "hibench.runs $end_exec <a href='$url'>${3}${1} $CONF</a> <strong>Time:</strong> $total_secs s."
#save the prepare
if [[ -z $3 ]] && [ "$SAVE_BENCH" == "1" ] ; then
logger "Saving $3 to disk"
$DSH_MASTER $H_DIR/bin/hadoop fs -get -ignoreCrc /HiBench $SAVE_LOCATION 2>&1 |tee -a $LOG_PATH
fi
stop_monit
logger "# Checking disk space with df AFTER"
$DSH "df -h" 2>&1 |tee -a $LOG_PATH
logger "# Checking hadoop folder space AFTER"
$DSH "du -sh $HDD/*" 2>&1 |tee -a $LOG_PATH
save_bench "${3}${1}"
}
start_time=$(date '+%s')
########################################################
logger "Starting execution of HiBench"
##PREPARED="/scratch/local/ssd/pristine/prepared"
#"wordcount" "sort" "terasort" "kmeans" "pagerank" "bayes" "nutchindexing" "hivebench" "dfsioe"
# "nutchindexing"
for bench in $(echo "$LIST_BENCHS")
do
restart_hadoop
#Delete previous data
#$DSH_MASTER "${H_DIR}/bin/hadoop fs -rmr /HiBench" 2>&1 |tee -a $LOG_PATH
echo "" > "${HIB_DIR}$bench/hibench.report"
#just in case check if the input file exists in hadoop
if [ "$DELETE_HDFS" == "0" ] ; then
get_bench_name $bench
input_exists=$($DSH_MASTER $H_DIR/bin/hadoop fs -ls "/HiBench/$full_name/Input" 2> /dev/null |grep "Found ")
if [ "$input_exists" != "" ] ; then
logger "Input folder seems OK"
else
logger "Input folder does not exist, RESET and RESTART"
$DSH_MASTER $H_DIR/bin/hadoop fs -ls "/HiBench/$full_name/Input" 2>&1 |tee -a $LOG_PATH
DELETE_HDFS=1
restart_hadoop
fi
fi
echo "# $(date +"%H:%M:%S") STARTING $bench" 2>&1 |tee -a $LOG_PATH
##mkdir -p "$PREPARED/$bench"
#if [ ! -f "$PREPARED/${i}.tbza" ] ; then
#hive leaves tmp config files
#if [ "$bench" != "hivebench" ] ; then
# $DSH_MASTER "rm /tmp/hive* /tmp/pristine/hive*" 2>&1 |tee -a $LOG_PATH
#fi
if [ "$DELETE_HDFS" == "1" ] ; then
if [ "$bench" != "dfsioe" ] ; then
execute_bench $bench ${HIB_DIR}$bench/bin/prepare.sh "prep_"
elif [ "$bench" == "dfsioe" ] ; then
execute_bench $bench ${HIB_DIR}$bench/bin/prepare-read.sh "prep_"
fi
else
logger "Reusing previous RUN prepared $bench"
fi
#if [ "$bench" = "wordcounta" ] ; then
# echo "# $(date +"%H:%M:%S") SAVING PREPARED DATA for $bench"
#
# $DIR/bin/hadoop fs -get /HiBench $PREPARED/$bench/
# tar -cjf $PREPARED/${i}.tbz $PREPARED/$bench/
# rm -rf $PREPARED/$bench
#fi
#else
# echo "# $(date +"%H:%M:%S") RESTORING PREPARED DATA for $bench"
# tar -xjf $PREPARED/${i}.tbz $PREPARED/
# $HADOOPDIR/bin/hadoop fs -put $PREPARED/HiBench /HiBench
# rm -rf $PREPARED/HiBench
#fi
logger "$(date +"%H:%M:%S") RUNNING $bench"
if [ "$bench" != "hivebench" ] && [ "$bench" != "dfsioe" ] ; then
execute_bench $bench ${HIB_DIR}$bench/bin/run.sh
elif [ "$bench" == "hivebench" ] ; then
execute_bench hivebench_agregation ${HIB_DIR}hivebench/bin/run-aggregation.sh
execute_bench hivebench_join ${HIB_DIR}hivebench/bin/run-join.sh
elif [ "$bench" == "dfsioe" ] ; then
execute_bench dfsioe_read ${HIB_DIR}dfsioe/bin/run-read.sh
execute_bench dfsioe_write ${HIB_DIR}dfsioe/bin/run-write.sh
fi
done
logger "$(date +"%H:%M:%S") DONE $bench"
#clean output data
get_bench_name $bench
$DSH_MASTER "${H_DIR}/bin/hadoop fs -rmr /HiBench/$full_name/Output"
########################################################
end_time=$(date '+%s')
#clean up
stop_hadoop
stop_monit
#copy
$DSH "cp $HDD/* $JOB_PATH/"
#report
#finish_date=`$DATE`
#total_time=`expr $(date '+%s') - $(date '+%s')`
#$(touch ${JOB_PATH}/finish_${finish_date})
#$(touch ${JOB_PATH}/total_${total_time})
du -h $JOB_PATH|tail -n 1
logger "DONE, total time $total_time seconds. Path $JOB_PATH"
|
Aloja/aloja
|
shell/OLD/run_al-03.sh
|
Shell
|
apache-2.0
| 22,144 |
/*
* Copyright 2013-2014 Dario Manesku. All rights reserved.
* License: https://github.com/bkaradzic/bgfx/blob/master/LICENSE
*/
#include "common.sh"
uniform vec4 u_params0;
uniform vec4 u_params1;
uniform vec4 u_params2;
uniform vec4 u_color;
uniform vec4 u_materialKa;
uniform vec4 u_materialKd;
uniform vec4 u_materialKs;
uniform vec4 u_lightPosition;
uniform vec4 u_lightAmbientPower;
uniform vec4 u_lightDiffusePower;
uniform vec4 u_lightSpecularPower;
uniform vec4 u_lightSpotDirectionInner;
uniform vec4 u_lightAttenuationSpotOuter;
uniform vec4 u_smSamplingParams;
uniform vec4 u_csmFarDistances;
#if SM_OMNI
uniform vec4 u_tetraNormalGreen;
uniform vec4 u_tetraNormalYellow;
uniform vec4 u_tetraNormalBlue;
uniform vec4 u_tetraNormalRed;
#endif
SAMPLER2D(s_shadowMap0, 4);
SAMPLER2D(s_shadowMap1, 5);
SAMPLER2D(s_shadowMap2, 6);
SAMPLER2D(s_shadowMap3, 7);
struct Shader
{
vec3 ambi;
vec3 diff;
vec3 spec;
};
Shader evalShader(float _diff, float _spec)
{
Shader shader;
shader.ambi = u_lightAmbientPower.xyz * u_lightAmbientPower.w * u_materialKa.xyz;
shader.diff = u_lightDiffusePower.xyz * u_lightDiffusePower.w * u_materialKd.xyz * _diff;
shader.spec = u_lightSpecularPower.xyz * u_lightSpecularPower.w * u_materialKs.xyz * _spec;
return shader;
}
float computeVisibility(sampler2D _sampler
, vec4 _shadowCoord
, float _bias
, vec4 _samplingParams
, vec2 _texelSize
, float _depthMultiplier
, float _minVariance
, float _hardness
)
{
float visibility;
#if SM_LINEAR
vec4 shadowcoord = vec4(_shadowCoord.xy / _shadowCoord.w, _shadowCoord.z, 1.0);
#else
vec4 shadowcoord = _shadowCoord;
#endif
#if SM_HARD
visibility = hardShadow(_sampler, shadowcoord, _bias);
#elif SM_PCF
visibility = PCF(_sampler, shadowcoord, _bias, _samplingParams, _texelSize);
#elif SM_VSM
visibility = VSM(_sampler, shadowcoord, _bias, _depthMultiplier, _minVariance);
#elif SM_ESM
visibility = ESM(_sampler, shadowcoord, _bias, _depthMultiplier * _hardness);
#endif
return visibility;
}
|
LWJGL-CI/bgfx
|
examples/16-shadowmaps/fs_shadowmaps_color_lighting.sh
|
Shell
|
bsd-2-clause
| 2,065 |
sudo gem install cocoapods --pre;
pod repo update;
pod trunk push VirgilCrypto.podspec;
|
VirgilSecurity/VirgilCryptoiOS
|
CI/publish-cocoapods.sh
|
Shell
|
bsd-3-clause
| 88 |
#!/bin/bash -e
IMG_FILE="${STAGE_WORK_DIR}/${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}.img"
NOOBS_DIR="${STAGE_WORK_DIR}/${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}"
unmount_image ${IMG_FILE}
mkdir -p ${STAGE_WORK_DIR}
cp ${WORK_DIR}/export-image/${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}.img ${STAGE_WORK_DIR}/
rm -rf ${STAGE_WORK_DIR}/${IMG_DATE}-${IMG_NAME}${IMG_SUFFIX}
PARTED_OUT=$(parted -s ${IMG_FILE} unit b print)
BOOT_OFFSET=$(echo "$PARTED_OUT" | grep -e '^ 1'| xargs echo -n \
| cut -d" " -f 2 | tr -d B)
BOOT_LENGTH=$(echo "$PARTED_OUT" | grep -e '^ 1'| xargs echo -n \
| cut -d" " -f 4 | tr -d B)
ROOT_OFFSET=$(echo "$PARTED_OUT" | grep -e '^ 2'| xargs echo -n \
| cut -d" " -f 2 | tr -d B)
ROOT_LENGTH=$(echo "$PARTED_OUT" | grep -e '^ 2'| xargs echo -n \
| cut -d" " -f 4 | tr -d B)
BOOT_DEV=$(losetup --show -f -o ${BOOT_OFFSET} --sizelimit ${BOOT_LENGTH} ${IMG_FILE})
ROOT_DEV=$(losetup --show -f -o ${ROOT_OFFSET} --sizelimit ${ROOT_LENGTH} ${IMG_FILE})
echo "/boot: offset $BOOT_OFFSET, length $BOOT_LENGTH"
echo "/: offset $ROOT_OFFSET, length $ROOT_LENGTH"
mkdir -p ${STAGE_WORK_DIR}/rootfs
mkdir -p ${NOOBS_DIR}
mount $ROOT_DEV ${STAGE_WORK_DIR}/rootfs
mount $BOOT_DEV ${STAGE_WORK_DIR}/rootfs/boot
ln -sv "/lib/systemd/system/apply_noobs_os_config.service" "$ROOTFS_DIR/etc/systemd/system/multi-user.target.wants/apply_noobs_os_config.service"
bsdtar --numeric-owner --format gnutar --use-compress-program pxz -C ${STAGE_WORK_DIR}/rootfs/boot -cpf ${NOOBS_DIR}/boot.tar.xz .
umount ${STAGE_WORK_DIR}/rootfs/boot
bsdtar --numeric-owner --format gnutar --use-compress-program pxz -C ${STAGE_WORK_DIR}/rootfs --one-file-system -cpf ${NOOBS_DIR}/root.tar.xz .
unmount_image ${IMG_FILE}
|
cfstras/pi-gen
|
export-noobs/prerun.sh
|
Shell
|
bsd-3-clause
| 1,702 |
#!/usr/bin/sh
export PATH=$(pwd)/node_modules/.bin:$PATH
istanbul cover ./node_modules/mocha/bin/_mocha -- -u exports -R tap 'test/**/*tests.js' > results.tap
istanbul report cobertura --root coverage
|
openAgile/CommitStream.Web
|
src/app/test-coverage.sh
|
Shell
|
bsd-3-clause
| 202 |
#!/usr/bin/env bash
declare -A params=$5 # Create an associative array
paramsTXT=""
if [ -n "$5" ]; then
for element in "${!params[@]}"
do
paramsTXT="${paramsTXT}
fastcgi_param ${element} ${params[$element]};"
done
fi
block="server {
listen ${3:-80};
listen ${4:-443} ssl http2;
server_name $1;
root \"$2\";
index index.html index.htm index.php app_dev.php;
charset utf-8;
location / {
try_files \$uri \$uri/ /app_dev.php?\$query_string;
}
location = /favicon.ico { access_log off; log_not_found off; }
location = /robots.txt { access_log off; log_not_found off; }
access_log off;
error_log /var/log/nginx/$1-ssl-error.log error;
sendfile off;
client_max_body_size 100m;
# DEV
location ~ ^/(app_dev|app_test|config)\.php(/|\$) {
fastcgi_split_path_info ^(.+\.php)(/.+)\$;
fastcgi_pass unix:/var/run/php/php7.1-fpm.sock;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
$paramsTXT
fastcgi_intercept_errors off;
fastcgi_buffer_size 16k;
fastcgi_buffers 4 16k;
}
# PROD
location ~ ^/app\.php(/|$) {
fastcgi_split_path_info ^(.+\.php)(/.+)$;
fastcgi_pass unix:/var/run/php/php7.1-fpm.sock;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
$paramsTXT
fastcgi_intercept_errors off;
fastcgi_buffer_size 16k;
fastcgi_buffers 4 16k;
internal;
}
location ~ /\.ht {
deny all;
}
ssl_certificate /etc/nginx/ssl/$1.crt;
ssl_certificate_key /etc/nginx/ssl/$1.key;
}
"
echo "$block" > "/etc/nginx/sites-available/$1"
ln -fs "/etc/nginx/sites-available/$1" "/etc/nginx/sites-enabled/$1"
|
Mike-he/sandbox-Workerman-RPC
|
vendor/laravel/homestead/scripts/serve-symfony2.sh
|
Shell
|
mit
| 1,857 |
#!/bin/sh
TC="/usr/sbin/tc"
IPTABLES="/usr/sbin/iptables"
NVRAM="/bin/config"
ECHO="/bin/echo"
WAN_IF="$($NVRAM get wan_ifnames)"
WAN_PROTO="$($NVRAM get wan_proto)"
FILTER_ADD="$TC filter add dev $WAN_IF"
UPRATE="$($NVRAM get qos_uprate)"
QoS_ENABLE="$($NVRAM get qos_endis_on)"
BANDCTL="$($NVRAM get qos_threshold)"
WAN_SPEED=`cat /tmp/WAN_status | cut -f 1 -d 'M'`
start(){
if [ "x$QoS_ENABLE" != "x1" ]; then
dni_qos --MFS "0:$BANDCTL"
return
fi
if [ "x$WAN_PROTO" = "xpptp" ]; then
if [ "x$BANDCTL" = "x0" ] || [ $UPRATE -le 0 ] || [ $UPRATE -gt 1000000 ]; then
UPRATE=1000000
fi
elif [ "x$WAN_PROTO" = "xpppoe" ]; then
if [ "x$BANDCTL" = "x0" ] || [ $UPRATE -le 0 ] || [ $UPRATE -gt 1000000 ]; then
UPRATE=1000000
fi
else
if [ "x$BANDCTL" = "x0" ] || [ $UPRATE -le 0 ] || [ $UPRATE -gt 1000000 ]; then
UPRATE=1000000
fi
fi
dni_qos --MFS "$UPRATE:$BANDCTL"
}
stop(){
dni_qos --MFS "0:$BANDCTL"
}
status(){
$IPTABLES -t mangle -nvL
}
case "$1" in
stop)
stop
;;
start | restart )
stop
start
;;
status)
status
;;
*)
echo $"Usage:$0 {start|stop|restart|status}"
exit 1
esac
|
jameshilliard/WM2500RP-V1.0.0.34_gpl_src
|
target/linux/x6200/base-files/sbin/qos.sh
|
Shell
|
gpl-2.0
| 1,142 |
#!/bin/sh
#
# Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
## some tests require path to find test source dir
if [ "${TESTSRC}" = "" ]
then
TESTSRC=${PWD}
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi
echo "TESTSRC=${TESTSRC}"
## Adding common setup Variables for running shell tests.
. ${TESTSRC}/../../../test_env.sh
${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xinternalversion | sed 's/amd64/x86/' | grep "x86" | grep "Server VM" | grep "debug"
# Only test fastdebug Server VM on x86
if [ $? != 0 ]
then
echo "Test Passed"
exit 0
fi
# grep for support integer multiply vectors (cpu with SSE4.1)
${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -XX:+PrintMiscellaneous -XX:+Verbose -version | grep "cores per cpu" | grep "sse4.1"
if [ $? != 0 ]
then
SSE=2
else
SSE=4
fi
cp ${TESTSRC}${FS}TestIntVect.java .
${COMPILEJAVA}${FS}bin${FS}javac ${TESTJAVACOPTS} -d . TestIntVect.java
${TESTJAVA}${FS}bin${FS}java ${TESTOPTS} -Xbatch -XX:-TieredCompilation -XX:CICompilerCount=1 -XX:+PrintCompilation -XX:+TraceNewVectors TestIntVect > test.out 2>&1
COUNT=`grep AddVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 4 ]
then
echo "Test Failed: AddVI $COUNT < 4"
exit 1
fi
# AddVI is generated for test_subc
COUNT=`grep SubVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 4 ]
then
echo "Test Failed: SubVI $COUNT < 4"
exit 1
fi
# MulVI is only supported with SSE4.1.
if [ $SSE -gt 3 ]
then
# LShiftVI+SubVI is generated for test_mulc
COUNT=`grep MulVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 2 ]
then
echo "Test Failed: MulVI $COUNT < 2"
exit 1
fi
fi
COUNT=`grep AndV test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 3 ]
then
echo "Test Failed: AndV $COUNT < 3"
exit 1
fi
COUNT=`grep OrV test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 3 ]
then
echo "Test Failed: OrV $COUNT < 3"
exit 1
fi
COUNT=`grep XorV test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 3 ]
then
echo "Test Failed: XorV $COUNT < 3"
exit 1
fi
# LShiftVI+SubVI is generated for test_mulc
COUNT=`grep LShiftVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 5 ]
then
echo "Test Failed: LShiftVI $COUNT < 5"
exit 1
fi
COUNT=`grep RShiftVI test.out | sed '/URShiftVI/d' | wc -l | awk '{print $1}'`
if [ $COUNT -lt 3 ]
then
echo "Test Failed: RShiftVI $COUNT < 3"
exit 1
fi
COUNT=`grep URShiftVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 3 ]
then
echo "Test Failed: URShiftVI $COUNT < 3"
exit 1
fi
|
netroby/jdk9-shenandoah-hotspot
|
test/compiler/c2/7200264/Test7200264.sh
|
Shell
|
gpl-2.0
| 3,515 |
#!/bin/bash
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# $Id$
# Based on the am-wrapper.sh script (sys-devel/automake-wrapper-1-r1)
#
# Executes the correct fox-config version, based on the value of WANT_FOX.
# All versions of fox after 1.0.x ship with a fox-config script
#
#
# Stable branches first, in descending order, then unstable branches.
# After a new stable branch, prepend the new version and bump (or remove)
# the last unstable branch
#
vers="1.4 1.2 1.5"
bindir=/usr/bin
if [ "${0##*/}" = "fox-wrapper.sh" ] ; then
echo "fox-wrapper: Don't call this script directly, use fox-config instead" >&2
exit 1
fi
if [ -z "${WANT_FOX}" ] ; then
echo "fox-wrapper: Set the WANT_FOX variable to the desired version of fox, e.g.:" >&2
echo " WANT_FOX=\"1.2\" fox-config $@"
exit 1
fi
for v in ${vers} ; do
eval binary_${v/./_}="fox-${v}-config"
done
#
# Check the WANT_FOX setting
#
for v in ${vers} x ; do
if [ "${v}" = "x" ] ; then
echo "fox-wrapper: WANT_FOX was set to an invalid version ${WANT_FOX}" >&2
echo " Valid values of WANT_FOX are: ${vers// /, }"
exit 1
fi
if [ "${WANT_FOX}" = "${v}" ] ; then
binary="binary_${v/./_}"
binary="${!binary}"
break
fi
done
if [ "${WANT_FOXWRAPPER_DEBUG}" ] ; then
echo "fox-wrapper: DEBUG: WANT_FOX is set to ${WANT_FOX}" >&2
echo "fox-wrapper: DEBUG: will execute <$binary>" >&2
fi
#
# for further consistency
#
for v in ${vers} ; do
mybin="binary_${v/./_}"
if [ "${binary}" = "${!mybin}" ] ; then
export WANT_FOX="${v}"
fi
done
#
# Now try to run the binary
#
if [ ! -x "${bindir}/${binary}" ] ; then
echo "fox-wrapper: $binary is missing or not executable." >&2
echo " Please try emerging the correct version of fox, i.e.:" >&2
echo " emerge '=x11-libs/${binary/-config/}*'" >&2
exit 1
fi
"$binary" "$@"
|
DmitriyHetman/gentoo
|
x11-libs/fox-wrapper/files/fox-wrapper-1.sh
|
Shell
|
gpl-3.0
| 1,908 |
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
cd fuzzing
make oss-fuzz
|
googlefonts/oss-fuzz
|
projects/uwebsockets/build.sh
|
Shell
|
apache-2.0
| 685 |
#!/bin/bash
##########################################################################
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012
# Vincent C. Passaro ([email protected])
# Shannon Mitchell ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
##########################################################################
###################### Fotis Networks LLC ###############################
# By Vincent C. Passaro #
# Fotis Networks LLC #
# Vincent[.]Passaro[@]fotisnetworks[.]com #
# www.fotisnetworks.com #
###################### Fotis Networks LLC ###############################
#
# _____________________________________________________________________
# | Version | Change Information | Author | Date |
# |__________|_______________________|____________________|____________|
# | 1.0 | Initial Script | Vincent C. Passaro | 1-Aug-2012 |
# | | Creation | | |
# |__________|_______________________|____________________|____________|
#
#######################DISA INFORMATION##################################
# Group ID (Vulid): V-23741
# Group Title: GEN003601
# Rule ID: SV-37594r1_rule
# Severity: medium
# Rule Version (STIG-ID): GEN003601
# Rule Title: TCP backlog queue sizes must be set appropriately.
#
# Vulnerability Discussion: To provide some mitigation to TCP Denial of
# Service attacks, the TCP backlog queue sizes must be set to at least 1280
# or in accordance with product-specific guidelines.
#
# Responsibility: System Administrator
# IAControls: ECSC-1
#
# Check Content:
#
# # cat /proc/sys/net/ipv4/tcp_max_syn_backlog
# If the result is not 1280 or greater, this is a finding.
#
# Fix Text:
#
# Edit /etc/sysctl.conf and add a setting for
# "net.ipv4.tcp_max_syn_backlog=1280".
# Procedure:
# sysctl -p
#######################DISA INFORMATION##################################
# Global Variables
PDI=GEN003601
BACKLOG=$( cat /proc/sys/net/ipv4/tcp_max_syn_backlog )
#Start-Lockdown
if [ $BACKLOG -lt 1280 ]
then
echo " " >> /etc/sysctl.conf
echo "#Added by STIG check $PDI" >> /etc/sysctl.conf
echo "net.ipv4.tcp_max_syn_backlog=1280" >> /etc/sysctl.conf
sysctl -p > /dev/null
fi
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5/prod/GEN003601.sh
|
Shell
|
apache-2.0
| 3,275 |
#!/usr/bin/env bash
${IDRIS:-idris} $@ --quiet --port none --nocolour interpret003.idr < input.in
rm -f *.ibc
|
kojiromike/Idris-dev
|
test/interpret003/run.sh
|
Shell
|
bsd-3-clause
| 110 |
#!/bin/sh
# $FreeBSD$
. `dirname $0`/conf.sh
echo "1..1"
ddbs=2048
nblocks1=1024
nblocks2=`expr $nblocks1 / \( $ddbs / 512 \)`
src=`mktemp $base.XXXXXX` || exit 1
dst=`mktemp $base.XXXXXX` || exit 1
us0=$(attach_md -t malloc -s $(expr $nblocks1 + 1)) || exit 1
us1=$(attach_md -t malloc -s $(expr $nblocks1 + 1)) || exit 1
us2=$(attach_md -t malloc -s $(expr $nblocks1 + 1)) || exit 1
dd if=/dev/random of=${src} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
graid3 label $name /dev/${us0} /dev/${us1} /dev/${us2} || exit 1
devwait
#
# Writing without DATA component and rebuild of DATA component.
#
graid3 remove -n 1 $name
dd if=/dev/zero of=/dev/${us1} bs=512 count=`expr $nblocks1 + 1` >/dev/null 2>&1
dd if=${src} of=/dev/raid3/${name} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
graid3 insert -n 1 $name md${us1}
sleep 1
dd if=/dev/raid3/${name} of=${dst} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
if [ `md5 -q ${src}` != `md5 -q ${dst}` ]; then
echo "not ok 1"
else
echo "ok 1"
fi
rm -f ${src} ${dst}
|
TigerBSD/TigerBSD
|
FreeBSD/tests/sys/geom/class/raid3/8_test.sh
|
Shell
|
isc
| 1,010 |
#!/bin/bash
set -o errexit -o nounset -o xtrace
# Quick start:
# wget -O - https://raw.github.com/snoyberg/keter/master/setup-keter.sh | bash -ex
sudo apt-get update
sudo apt-get install postgresql haskell-platform -y
cabal update
cabal install keter --force-reinstalls
sudo mkdir -p /opt/keter/bin
sudo cp ~/.cabal/bin/keter /opt/keter/bin
sudo mkdir -p /opt/keter/etc
cat > /tmp/keter-config.yaml <<EOF
# Directory containing incoming folder, where to store logs, etc. Relative to
# the config file directory.
root: ..
# Keter can listen on multiple ports for incoming connections. These ports can
# have HTTPS either enabled or disabled.
listeners:
# HTTP
- host: "*4" # Listen on all IPv4 hosts
#port: 80 # Could be used to modify port
# HTTPS
- host: "*4"
#port: 443
key: key.pem
certificate: certificate.pem
# User to run applications as
# setuid: ubuntu
# Get the user's IP address from x-forwarded-for. Useful when sitting behind a
# load balancer like Amazon ELB.
# ip-from-header: true
EOF
sudo chown root:root /tmp/keter-config.yaml
sudo mv /tmp/keter-config.yaml /opt/keter/etc
cat > /tmp/keter.conf <<EOF
# /etc/init/keter.conf
start on (net-device-up and local-filesystems and runlevel [2345])
stop on runlevel [016]
respawn
console none
exec /opt/keter/bin/keter /opt/keter/etc/keter-config.yaml
EOF
sudo chown root:root /tmp/keter.conf
sudo mv /tmp/keter.conf /etc/init
sudo start keter
sudo mkdir -p /opt/keter/incoming
sudo chown "$USER" /opt/keter/incoming
|
telser/keter
|
setup-keter.sh
|
Shell
|
mit
| 1,530 |
#!/bin/sh
test_description='fetch/clone from a shallow clone'
. ./test-lib.sh
commit() {
echo "$1" >tracked &&
git add tracked &&
git commit -m "$1"
}
test_expect_success 'setup' '
commit 1 &&
commit 2 &&
commit 3 &&
commit 4 &&
git config --global transfer.fsckObjects true
'
test_expect_success 'setup shallow clone' '
git clone --no-local --depth=2 .git shallow &&
git --git-dir=shallow/.git log --format=%s >actual &&
cat <<EOF >expect &&
4
3
EOF
test_cmp expect actual
'
test_expect_success 'clone from shallow clone' '
git clone --no-local shallow shallow2 &&
(
cd shallow2 &&
git fsck &&
git log --format=%s >actual &&
cat <<EOF >expect &&
4
3
EOF
test_cmp expect actual
)
'
test_expect_success 'fetch from shallow clone' '
(
cd shallow &&
commit 5
) &&
(
cd shallow2 &&
git fetch &&
git fsck &&
git log --format=%s origin/master >actual &&
cat <<EOF >expect &&
5
4
3
EOF
test_cmp expect actual
)
'
test_expect_success 'fetch --depth from shallow clone' '
(
cd shallow &&
commit 6
) &&
(
cd shallow2 &&
git fetch --depth=2 &&
git fsck &&
git log --format=%s origin/master >actual &&
cat <<EOF >expect &&
6
5
EOF
test_cmp expect actual
)
'
test_expect_success 'fetch --unshallow from shallow clone' '
(
cd shallow2 &&
git fetch --unshallow &&
git fsck &&
git log --format=%s origin/master >actual &&
cat <<EOF >expect &&
6
5
4
3
EOF
test_cmp expect actual
)
'
test_expect_success 'fetch something upstream has but hidden by clients shallow boundaries' '
# the blob "1" is available in .git but hidden by the
# shallow2/.git/shallow and it should be resent
! git --git-dir=shallow2/.git cat-file blob `echo 1|git hash-object --stdin` >/dev/null &&
echo 1 >1.t &&
git add 1.t &&
git commit -m add-1-back &&
(
cd shallow2 &&
git fetch ../.git +refs/heads/master:refs/remotes/top/master &&
git fsck &&
git log --format=%s top/master >actual &&
cat <<EOF >expect &&
add-1-back
4
3
EOF
test_cmp expect actual
) &&
git --git-dir=shallow2/.git cat-file blob `echo 1|git hash-object --stdin` >/dev/null
'
test_expect_success 'fetch that requires changes in .git/shallow is filtered' '
(
cd shallow &&
git checkout --orphan no-shallow &&
commit no-shallow
) &&
git init notshallow &&
(
cd notshallow &&
git fetch ../shallow/.git refs/heads/*:refs/remotes/shallow/*&&
git for-each-ref --format="%(refname)" >actual.refs &&
cat <<EOF >expect.refs &&
refs/remotes/shallow/no-shallow
EOF
test_cmp expect.refs actual.refs &&
git log --format=%s shallow/no-shallow >actual &&
cat <<EOF >expect &&
no-shallow
EOF
test_cmp expect actual
)
'
test_expect_success 'fetch --update-shallow' '
(
cd shallow &&
git checkout master &&
commit 7 &&
git tag -m foo heavy-tag HEAD^ &&
git tag light-tag HEAD^:tracked
) &&
(
cd notshallow &&
git fetch --update-shallow ../shallow/.git refs/heads/*:refs/remotes/shallow/* &&
git fsck &&
git for-each-ref --sort=refname --format="%(refname)" >actual.refs &&
cat <<EOF >expect.refs &&
refs/remotes/shallow/master
refs/remotes/shallow/no-shallow
refs/tags/heavy-tag
refs/tags/light-tag
EOF
test_cmp expect.refs actual.refs &&
git log --format=%s shallow/master >actual &&
cat <<EOF >expect &&
7
6
5
4
3
EOF
test_cmp expect actual
)
'
test_done
|
suryasingh/git
|
t/t5537-fetch-shallow.sh
|
Shell
|
gpl-2.0
| 3,280 |
#!/bin/sh
#
# Copyright (c) 2006 Junio C Hamano
#
test_description='git grep various.
'
. ./test-lib.sh
cat >hello.c <<EOF
#include <assert.h>
#include <stdio.h>
int main(int argc, const char **argv)
{
printf("Hello world.\n");
return 0;
/* char ?? */
}
EOF
test_expect_success setup '
{
echo foo mmap bar
echo foo_mmap bar
echo foo_mmap bar mmap
echo foo mmap bar_mmap
echo foo_mmap bar mmap baz
} >file &&
{
echo Hello world
echo HeLLo world
echo Hello_world
echo HeLLo_world
} >hello_world &&
{
echo "a+b*c"
echo "a+bc"
echo "abc"
} >ab &&
{
echo d &&
echo 0
} >d0 &&
echo vvv >v &&
echo ww w >w &&
echo x x xx x >x &&
echo y yy >y &&
echo zzz > z &&
mkdir t &&
echo test >t/t &&
echo vvv >t/v &&
mkdir t/a &&
echo vvv >t/a/v &&
{
echo "line without leading space1"
echo " line with leading space1"
echo " line with leading space2"
echo " line with leading space3"
echo "line without leading space2"
} >space &&
cat >hello.ps1 <<-\EOF &&
# No-op.
function dummy() {}
# Say hello.
function hello() {
echo "Hello world."
} # hello
# Still a no-op.
function dummy() {}
EOF
git add . &&
test_tick &&
git commit -m initial
'
test_expect_success 'grep should not segfault with a bad input' '
test_must_fail git grep "("
'
for H in HEAD ''
do
case "$H" in
HEAD) HC='HEAD:' L='HEAD' ;;
'') HC= L='in working tree' ;;
esac
test_expect_success "grep -w $L" '
{
echo ${HC}file:1:foo mmap bar
echo ${HC}file:3:foo_mmap bar mmap
echo ${HC}file:4:foo mmap bar_mmap
echo ${HC}file:5:foo_mmap bar mmap baz
} >expected &&
git -c grep.linenumber=false grep -n -w -e mmap $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L" '
{
echo ${HC}file:1:foo mmap bar
echo ${HC}file:3:foo_mmap bar mmap
echo ${HC}file:4:foo mmap bar_mmap
echo ${HC}file:5:foo_mmap bar mmap baz
} >expected &&
git -c grep.linenumber=true grep -w -e mmap $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L" '
{
echo ${HC}file:foo mmap bar
echo ${HC}file:foo_mmap bar mmap
echo ${HC}file:foo mmap bar_mmap
echo ${HC}file:foo_mmap bar mmap baz
} >expected &&
git -c grep.linenumber=true grep --no-line-number -w -e mmap $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (w)" '
: >expected &&
test_must_fail git grep -n -w -e "^w" $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (x)" '
{
echo ${HC}x:1:x x xx x
} >expected &&
git grep -n -w -e "x xx* x" $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (y-1)" '
{
echo ${HC}y:1:y yy
} >expected &&
git grep -n -w -e "^y" $H >actual &&
test_cmp expected actual
'
test_expect_success "grep -w $L (y-2)" '
: >expected &&
if git grep -n -w -e "^y y" $H >actual
then
echo should not have matched
cat actual
false
else
test_cmp expected actual
fi
'
test_expect_success "grep -w $L (z)" '
: >expected &&
if git grep -n -w -e "^z" $H >actual
then
echo should not have matched
cat actual
false
else
test_cmp expected actual
fi
'
test_expect_success "grep $L (t-1)" '
echo "${HC}t/t:1:test" >expected &&
git grep -n -e test $H >actual &&
test_cmp expected actual
'
test_expect_success "grep $L (t-2)" '
echo "${HC}t:1:test" >expected &&
(
cd t &&
git grep -n -e test $H
) >actual &&
test_cmp expected actual
'
test_expect_success "grep $L (t-3)" '
echo "${HC}t/t:1:test" >expected &&
(
cd t &&
git grep --full-name -n -e test $H
) >actual &&
test_cmp expected actual
'
test_expect_success "grep -c $L (no /dev/null)" '
! git grep -c test $H | grep /dev/null
'
test_expect_success "grep --max-depth -1 $L" '
{
echo ${HC}t/a/v:1:vvv
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth -1 -n -e vvv $H >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 $L" '
{
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- '*' $L" '
{
echo ${HC}t/a/v:1:vvv
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- "*" >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 1 $L" '
{
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 1 -n -e vvv $H >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- t $L" '
{
echo ${HC}t/v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- t >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- . t $L" '
{
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- . t >actual &&
test_cmp expected actual
'
test_expect_success "grep --max-depth 0 -- t . $L" '
{
echo ${HC}t/v:1:vvv
echo ${HC}v:1:vvv
} >expected &&
git grep --max-depth 0 -n -e vvv $H -- t . >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=false" '
echo "${HC}ab:a+bc" >expected &&
git -c grep.extendedRegexp=false grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=true" '
echo "${HC}ab:abc" >expected &&
git -c grep.extendedRegexp=true grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patterntype=basic" '
echo "${HC}ab:a+bc" >expected &&
git -c grep.patterntype=basic grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patterntype=extended" '
echo "${HC}ab:abc" >expected &&
git -c grep.patterntype=extended grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patterntype=fixed" '
echo "${HC}ab:a+b*c" >expected &&
git -c grep.patterntype=fixed grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success PCRE "grep $L with grep.patterntype=perl" '
echo "${HC}ab:a+b*c" >expected &&
git -c grep.patterntype=perl grep "a\x{2b}b\x{2a}c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success !PCRE "grep $L with grep.patterntype=perl errors without PCRE" '
test_must_fail git -c grep.patterntype=perl grep "foo.*bar"
'
test_expect_success "grep $L with grep.patternType=default and grep.extendedRegexp=true" '
echo "${HC}ab:abc" >expected &&
git \
-c grep.patternType=default \
-c grep.extendedRegexp=true \
grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=true and grep.patternType=default" '
echo "${HC}ab:abc" >expected &&
git \
-c grep.extendedRegexp=true \
-c grep.patternType=default \
grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patternType=extended and grep.extendedRegexp=false" '
echo "${HC}ab:abc" >expected &&
git \
-c grep.patternType=extended \
-c grep.extendedRegexp=false \
grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.patternType=basic and grep.extendedRegexp=true" '
echo "${HC}ab:a+bc" >expected &&
git \
-c grep.patternType=basic \
-c grep.extendedRegexp=true \
grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=false and grep.patternType=extended" '
echo "${HC}ab:abc" >expected &&
git \
-c grep.extendedRegexp=false \
-c grep.patternType=extended \
grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep $L with grep.extendedRegexp=true and grep.patternType=basic" '
echo "${HC}ab:a+bc" >expected &&
git \
-c grep.extendedRegexp=true \
-c grep.patternType=basic \
grep "a+b*c" $H ab >actual &&
test_cmp expected actual
'
test_expect_success "grep --count $L" '
echo ${HC}ab:3 >expected &&
git grep --count -e b $H -- ab >actual &&
test_cmp expected actual
'
test_expect_success "grep --count -h $L" '
echo 3 >expected &&
git grep --count -h -e b $H -- ab >actual &&
test_cmp expected actual
'
done
cat >expected <<EOF
file
EOF
test_expect_success 'grep -l -C' '
git grep -l -C1 foo >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:5
EOF
test_expect_success 'grep -c -C' '
git grep -c -C1 foo >actual &&
test_cmp expected actual
'
test_expect_success 'grep -L -C' '
git ls-files >expected &&
git grep -L -C1 nonexistent_string >actual &&
test_cmp expected actual
'
test_expect_success 'grep --files-without-match --quiet' '
git grep --files-without-match --quiet nonexistent_string >actual &&
test_cmp /dev/null actual
'
cat >expected <<EOF
file:foo mmap bar_mmap
EOF
test_expect_success 'grep -e A --and -e B' '
git grep -e "foo mmap" --and -e bar_mmap >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo_mmap bar mmap
file:foo_mmap bar mmap baz
EOF
test_expect_success 'grep ( -e A --or -e B ) --and -e B' '
git grep \( -e foo_ --or -e baz \) \
--and -e " mmap" >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar
EOF
test_expect_success 'grep -e A --and --not -e B' '
git grep -e "foo mmap" --and --not -e bar_mmap >actual &&
test_cmp expected actual
'
test_expect_success 'grep should ignore GREP_OPTIONS' '
GREP_OPTIONS=-v git grep " mmap bar\$" >actual &&
test_cmp expected actual
'
test_expect_success 'grep -f, non-existent file' '
test_must_fail git grep -f patterns
'
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
file:foo_mmap bar mmap
file:foo mmap bar_mmap
file:foo_mmap bar mmap baz
EOF
cat >pattern <<EOF
mmap
EOF
test_expect_success 'grep -f, one pattern' '
git grep -f pattern >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
file:foo_mmap bar mmap
file:foo mmap bar_mmap
file:foo_mmap bar mmap baz
t/a/v:vvv
t/v:vvv
v:vvv
EOF
cat >patterns <<EOF
mmap
vvv
EOF
test_expect_success 'grep -f, multiple patterns' '
git grep -f patterns >actual &&
test_cmp expected actual
'
test_expect_success 'grep, multiple patterns' '
git grep "$(cat patterns)" >actual &&
test_cmp expected actual
'
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
file:foo_mmap bar mmap
file:foo mmap bar_mmap
file:foo_mmap bar mmap baz
t/a/v:vvv
t/v:vvv
v:vvv
EOF
cat >patterns <<EOF
mmap
vvv
EOF
test_expect_success 'grep -f, ignore empty lines' '
git grep -f patterns >actual &&
test_cmp expected actual
'
test_expect_success 'grep -f, ignore empty lines, read patterns from stdin' '
git grep -f - <patterns >actual &&
test_cmp expected actual
'
cat >expected <<EOF
y:y yy
--
z:zzz
EOF
test_expect_success 'grep -q, silently report matches' '
>empty &&
git grep -q mmap >actual &&
test_cmp empty actual &&
test_must_fail git grep -q qfwfq >actual &&
test_cmp empty actual
'
test_expect_success 'grep -C1 hunk mark between files' '
git grep -C1 "^[yz]" >actual &&
test_cmp expected actual
'
test_expect_success 'log grep setup' '
echo a >>file &&
test_tick &&
GIT_AUTHOR_NAME="With * Asterisk" \
GIT_AUTHOR_EMAIL="[email protected]" \
git commit -a -m "second" &&
echo a >>file &&
test_tick &&
git commit -a -m "third" &&
echo a >>file &&
test_tick &&
GIT_AUTHOR_NAME="Night Fall" \
GIT_AUTHOR_EMAIL="[email protected]" \
git commit -a -m "fourth"
'
test_expect_success 'log grep (1)' '
git log --author=author --pretty=tformat:%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (2)' '
git log --author=" * " -F --pretty=tformat:%s >actual &&
{
echo second
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (3)' '
git log --author="^A U" --pretty=tformat:%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (4)' '
git log --author="frotz\.com>$" --pretty=tformat:%s >actual &&
{
echo second
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (5)' '
git log --author=Thor -F --pretty=tformat:%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (6)' '
git log --author=-0700 --pretty=tformat:%s >actual &&
>expect &&
test_cmp expect actual
'
test_expect_success 'log grep (7)' '
git log -g --grep-reflog="commit: third" --pretty=tformat:%s >actual &&
echo third >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (8)' '
git log -g --grep-reflog="commit: third" --grep-reflog="commit: second" --pretty=tformat:%s >actual &&
{
echo third && echo second
} >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (9)' '
git log -g --grep-reflog="commit: third" --author="Thor" --pretty=tformat:%s >actual &&
echo third >expect &&
test_cmp expect actual
'
test_expect_success 'log grep (9)' '
git log -g --grep-reflog="commit: third" --author="non-existent" --pretty=tformat:%s >actual &&
: >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep-reflog can only be used under -g' '
test_must_fail git log --grep-reflog="commit: third"
'
test_expect_success 'log with multiple --grep uses union' '
git log --grep=i --grep=r --format=%s >actual &&
{
echo fourth && echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --all-match with multiple --grep uses intersection' '
git log --all-match --grep=i --grep=r --format=%s >actual &&
{
echo third
} >expect &&
test_cmp expect actual
'
test_expect_success 'log with multiple --author uses union' '
git log --author="Thor" --author="Aster" --format=%s >actual &&
{
echo third && echo second && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --all-match with multiple --author still uses union' '
git log --all-match --author="Thor" --author="Aster" --format=%s >actual &&
{
echo third && echo second && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep --author uses intersection' '
# grep matches only third and fourth
# author matches only initial and third
git log --author="A U Thor" --grep=r --format=%s >actual &&
{
echo third
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep --grep --author takes union of greps and intersects with author' '
# grep matches initial and second but not third
# author matches only initial and third
git log --author="A U Thor" --grep=s --grep=l --format=%s >actual &&
{
echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log ---all-match -grep --author --author still takes union of authors and intersects with grep' '
# grep matches only initial and third
# author matches all but second
git log --all-match --author="Thor" --author="Night" --grep=i --format=%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --grep --author --author takes union of authors and intersects with grep' '
# grep matches only initial and third
# author matches all but second
git log --author="Thor" --author="Night" --grep=i --format=%s >actual &&
{
echo third && echo initial
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --all-match --grep --grep --author takes intersection' '
# grep matches only third
# author matches only initial and third
git log --all-match --author="A U Thor" --grep=i --grep=r --format=%s >actual &&
{
echo third
} >expect &&
test_cmp expect actual
'
test_expect_success 'log --author does not search in timestamp' '
: >expect &&
git log --author="$GIT_AUTHOR_DATE" >actual &&
test_cmp expect actual
'
test_expect_success 'log --committer does not search in timestamp' '
: >expect &&
git log --committer="$GIT_COMMITTER_DATE" >actual &&
test_cmp expect actual
'
test_expect_success 'grep with CE_VALID file' '
git update-index --assume-unchanged t/t &&
rm t/t &&
test "$(git grep test)" = "t/t:test" &&
git update-index --no-assume-unchanged t/t &&
git checkout t/t
'
cat >expected <<EOF
hello.c=#include <stdio.h>
hello.c: return 0;
EOF
test_expect_success 'grep -p with userdiff' '
git config diff.custom.funcname "^#" &&
echo "hello.c diff=custom" >.gitattributes &&
git grep -p return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c=int main(int argc, const char **argv)
hello.c: return 0;
EOF
test_expect_success 'grep -p' '
rm -f .gitattributes &&
git grep -p return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c-#include <stdio.h>
hello.c-
hello.c=int main(int argc, const char **argv)
hello.c-{
hello.c- printf("Hello world.\n");
hello.c: return 0;
EOF
test_expect_success 'grep -p -B5' '
git grep -p -B5 return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c=int main(int argc, const char **argv)
hello.c-{
hello.c- printf("Hello world.\n");
hello.c: return 0;
hello.c- /* char ?? */
hello.c-}
EOF
test_expect_success 'grep -W' '
git grep -W return >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c-#include <assert.h>
hello.c:#include <stdio.h>
EOF
test_expect_success 'grep -W shows no trailing empty lines' '
git grep -W stdio >actual &&
test_cmp expected actual
'
test_expect_success 'grep -W with userdiff' '
test_when_finished "rm -f .gitattributes" &&
git config diff.custom.xfuncname "^function .*$" &&
echo "hello.ps1 diff=custom" >.gitattributes &&
git grep -W echo >function-context-userdiff-actual
'
test_expect_success ' includes preceding comment' '
grep "# Say hello" function-context-userdiff-actual
'
test_expect_success ' includes function line' '
grep "=function hello" function-context-userdiff-actual
'
test_expect_success ' includes matching line' '
grep ": echo" function-context-userdiff-actual
'
test_expect_success ' includes last line of the function' '
grep "} # hello" function-context-userdiff-actual
'
for threads in $(test_seq 0 10)
do
test_expect_success "grep --threads=$threads & -c grep.threads=$threads" "
git grep --threads=$threads . >actual.$threads &&
if test $threads -ge 1
then
test_cmp actual.\$(($threads - 1)) actual.$threads
fi &&
git -c grep.threads=$threads grep . >actual.$threads &&
if test $threads -ge 1
then
test_cmp actual.\$(($threads - 1)) actual.$threads
fi
"
done
test_expect_success !PTHREADS,C_LOCALE_OUTPUT 'grep --threads=N or pack.threads=N warns when no pthreads' '
git grep --threads=2 Hello hello_world 2>err &&
grep ^warning: err >warnings &&
test_line_count = 1 warnings &&
grep -F "no threads support, ignoring --threads" err &&
git -c grep.threads=2 grep Hello hello_world 2>err &&
grep ^warning: err >warnings &&
test_line_count = 1 warnings &&
grep -F "no threads support, ignoring grep.threads" err &&
git -c grep.threads=2 grep --threads=4 Hello hello_world 2>err &&
grep ^warning: err >warnings &&
test_line_count = 2 warnings &&
grep -F "no threads support, ignoring --threads" err &&
grep -F "no threads support, ignoring grep.threads" err &&
git -c grep.threads=0 grep --threads=0 Hello hello_world 2>err &&
test_line_count = 0 err
'
test_expect_success 'grep from a subdirectory to search wider area (1)' '
mkdir -p s &&
(
cd s && git grep "x x x" ..
)
'
test_expect_success 'grep from a subdirectory to search wider area (2)' '
mkdir -p s &&
(
cd s || exit 1
( git grep xxyyzz .. >out ; echo $? >status )
! test -s out &&
test 1 = $(cat status)
)
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
EOF
test_expect_success 'grep -Fi' '
git grep -Fi "CHAR *" >actual &&
test_cmp expected actual
'
test_expect_success 'outside of git repository' '
rm -fr non &&
mkdir -p non/git/sub &&
echo hello >non/git/file1 &&
echo world >non/git/sub/file2 &&
{
echo file1:hello &&
echo sub/file2:world
} >non/expect.full &&
echo file2:world >non/expect.sub &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git grep o &&
git grep --no-index o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
cd sub &&
test_must_fail git grep o &&
git grep --no-index o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub
) &&
echo ".*o*" >non/git/.gitignore &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git grep o &&
git grep --no-index --exclude-standard o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
{
echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
git grep --no-index --no-exclude o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
test_expect_success 'outside of git repository with fallbackToNoIndex' '
rm -fr non &&
mkdir -p non/git/sub &&
echo hello >non/git/file1 &&
echo world >non/git/sub/file2 &&
cat <<-\EOF >non/expect.full &&
file1:hello
sub/file2:world
EOF
echo file2:world >non/expect.sub &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git -c grep.fallbackToNoIndex=false grep o &&
git -c grep.fallbackToNoIndex=true grep o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
cd sub &&
test_must_fail git -c grep.fallbackToNoIndex=false grep o &&
git -c grep.fallbackToNoIndex=true grep o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub
) &&
echo ".*o*" >non/git/.gitignore &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git -c grep.fallbackToNoIndex=false grep o &&
git -c grep.fallbackToNoIndex=true grep --exclude-standard o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
{
echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
git -c grep.fallbackToNoIndex grep --no-exclude o >../actual.full &&
test_cmp ../expect.with.ignored ../actual.full
)
'
test_expect_success 'inside git repository but with --no-index' '
rm -fr is &&
mkdir -p is/git/sub &&
echo hello >is/git/file1 &&
echo world >is/git/sub/file2 &&
echo ".*o*" >is/git/.gitignore &&
{
echo file1:hello &&
echo sub/file2:world
} >is/expect.unignored &&
{
echo ".gitignore:.*o*" &&
cat is/expect.unignored
} >is/expect.full &&
: >is/expect.empty &&
echo file2:world >is/expect.sub &&
(
cd is/git &&
git init &&
test_must_fail git grep o >../actual.full &&
test_cmp ../expect.empty ../actual.full &&
git grep --untracked o >../actual.unignored &&
test_cmp ../expect.unignored ../actual.unignored &&
git grep --no-index o >../actual.full &&
test_cmp ../expect.full ../actual.full &&
git grep --no-index --exclude-standard o >../actual.unignored &&
test_cmp ../expect.unignored ../actual.unignored &&
cd sub &&
test_must_fail git grep o >../../actual.sub &&
test_cmp ../../expect.empty ../../actual.sub &&
git grep --no-index o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub &&
git grep --untracked o >../../actual.sub &&
test_cmp ../../expect.sub ../../actual.sub
)
'
test_expect_success 'grep --no-index descends into repos, but not .git' '
rm -fr non &&
mkdir -p non/git &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
echo magic >file &&
git init repo &&
(
cd repo &&
echo magic >file &&
git add file &&
git commit -m foo &&
echo magic >.git/file
) &&
cat >expect <<-\EOF &&
file
repo/file
EOF
git grep -l --no-index magic >actual &&
test_cmp expect actual
)
'
test_expect_success 'setup double-dash tests' '
cat >double-dash <<EOF &&
--
->
other
EOF
git add double-dash
'
cat >expected <<EOF
double-dash:->
EOF
test_expect_success 'grep -- pattern' '
git grep -- "->" >actual &&
test_cmp expected actual
'
test_expect_success 'grep -- pattern -- pathspec' '
git grep -- "->" -- double-dash >actual &&
test_cmp expected actual
'
test_expect_success 'grep -e pattern -- path' '
git grep -e "->" -- double-dash >actual &&
test_cmp expected actual
'
cat >expected <<EOF
double-dash:--
EOF
test_expect_success 'grep -e -- -- path' '
git grep -e -- -- double-dash >actual &&
test_cmp expected actual
'
test_expect_success 'dashdash disambiguates rev as rev' '
test_when_finished "rm -f master" &&
echo content >master &&
echo master:hello.c >expect &&
git grep -l o master -- hello.c >actual &&
test_cmp expect actual
'
test_expect_success 'dashdash disambiguates pathspec as pathspec' '
test_when_finished "git rm -f master" &&
echo content >master &&
git add master &&
echo master:content >expect &&
git grep o -- master >actual &&
test_cmp expect actual
'
test_expect_success 'report bogus arg without dashdash' '
test_must_fail git grep o does-not-exist
'
test_expect_success 'report bogus rev with dashdash' '
test_must_fail git grep o hello.c --
'
test_expect_success 'allow non-existent path with dashdash' '
# We need a real match so grep exits with success.
tree=$(git ls-tree HEAD |
sed s/hello.c/not-in-working-tree/ |
git mktree) &&
git grep o "$tree" -- not-in-working-tree
'
test_expect_success 'grep --no-index pattern -- path' '
rm -fr non &&
mkdir -p non/git &&
(
GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
echo hello >hello &&
echo goodbye >goodbye &&
echo hello:hello >expect &&
git grep --no-index o -- hello >actual &&
test_cmp expect actual
)
'
test_expect_success 'grep --no-index complains of revs' '
test_must_fail git grep --no-index o master -- 2>err &&
test_i18ngrep "cannot be used with revs" err
'
test_expect_success 'grep --no-index prefers paths to revs' '
test_when_finished "rm -f master" &&
echo content >master &&
echo master:content >expect &&
git grep --no-index o master >actual &&
test_cmp expect actual
'
test_expect_success 'grep --no-index does not "diagnose" revs' '
test_must_fail git grep --no-index o :1:hello.c 2>err &&
test_i18ngrep ! -i "did you mean" err
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
hello.c: printf("Hello world.\n");
EOF
test_expect_success PCRE 'grep --perl-regexp pattern' '
git grep --perl-regexp "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success !PCRE 'grep --perl-regexp pattern errors without PCRE' '
test_must_fail git grep --perl-regexp "foo.*bar"
'
test_expect_success PCRE 'grep -P pattern' '
git grep -P "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success LIBPCRE2 "grep -P with (*NO_JIT) doesn't error out" '
git grep -P "(*NO_JIT)\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success !PCRE 'grep -P pattern errors without PCRE' '
test_must_fail git grep -P "foo.*bar"
'
test_expect_success 'grep pattern with grep.extendedRegexp=true' '
>empty &&
test_must_fail git -c grep.extendedregexp=true \
grep "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp empty actual
'
test_expect_success PCRE 'grep -P pattern with grep.extendedRegexp=true' '
git -c grep.extendedregexp=true \
grep -P "\p{Ps}.*?\p{Pe}" hello.c >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -P -v pattern' '
{
echo "ab:a+b*c"
echo "ab:a+bc"
} >expected &&
git grep -P -v "abc" ab >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -P -i pattern' '
cat >expected <<-EOF &&
hello.c: printf("Hello world.\n");
EOF
git grep -P -i "PRINTF\([^\d]+\)" hello.c >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -P -w pattern' '
{
echo "hello_world:Hello world"
echo "hello_world:HeLLo world"
} >expected &&
git grep -P -w "He((?i)ll)o" hello_world >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -P backreferences work (the PCRE NO_AUTO_CAPTURE flag is not set)' '
git grep -P -h "(?P<one>.)(?P=one)" hello_world >actual &&
test_cmp hello_world actual &&
git grep -P -h "(.)\1" hello_world >actual &&
test_cmp hello_world actual
'
test_expect_success 'grep -G invalidpattern properly dies ' '
test_must_fail git grep -G "a["
'
test_expect_success 'grep invalidpattern properly dies with grep.patternType=basic' '
test_must_fail git -c grep.patterntype=basic grep "a["
'
test_expect_success 'grep -E invalidpattern properly dies ' '
test_must_fail git grep -E "a["
'
test_expect_success 'grep invalidpattern properly dies with grep.patternType=extended' '
test_must_fail git -c grep.patterntype=extended grep "a["
'
test_expect_success PCRE 'grep -P invalidpattern properly dies ' '
test_must_fail git grep -P "a["
'
test_expect_success PCRE 'grep invalidpattern properly dies with grep.patternType=perl' '
test_must_fail git -c grep.patterntype=perl grep "a["
'
test_expect_success 'grep -G -E -F pattern' '
echo "ab:a+b*c" >expected &&
git grep -G -E -F "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=basic, =extended, =fixed' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=basic \
-c grep.patterntype=extended \
-c grep.patterntype=fixed \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -E -F -G pattern' '
echo "ab:a+bc" >expected &&
git grep -E -F -G "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=extended, =fixed, =basic' '
echo "ab:a+bc" >expected &&
git \
-c grep.patterntype=extended \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -F -G -E pattern' '
echo "ab:abc" >expected &&
git grep -F -G -E "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=fixed, =basic, =extended' '
echo "ab:abc" >expected &&
git \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
-c grep.patterntype=extended \
grep "a+b*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -G -F -P -E pattern' '
echo "d0:d" >expected &&
git grep -G -F -P -E "[\d]" d0 >actual &&
test_cmp expected actual
'
test_expect_success 'grep pattern with grep.patternType=fixed, =basic, =perl, =extended' '
echo "d0:d" >expected &&
git \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
-c grep.patterntype=perl \
-c grep.patterntype=extended \
grep "[\d]" d0 >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -G -F -E -P pattern' '
echo "d0:0" >expected &&
git grep -G -F -E -P "[\d]" d0 >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep pattern with grep.patternType=fixed, =basic, =extended, =perl' '
echo "d0:0" >expected &&
git \
-c grep.patterntype=fixed \
-c grep.patterntype=basic \
-c grep.patterntype=extended \
-c grep.patterntype=perl \
grep "[\d]" d0 >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -P pattern with grep.patternType=fixed' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=fixed \
grep -P "a\x{2b}b\x{2a}c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -F pattern with grep.patternType=basic' '
echo "ab:a+b*c" >expected &&
git \
-c grep.patterntype=basic \
grep -F "*c" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -G pattern with grep.patternType=fixed' '
{
echo "ab:a+b*c"
echo "ab:a+bc"
} >expected &&
git \
-c grep.patterntype=fixed \
grep -G "a+b" ab >actual &&
test_cmp expected actual
'
test_expect_success 'grep -E pattern with grep.patternType=fixed' '
{
echo "ab:a+b*c"
echo "ab:a+bc"
echo "ab:abc"
} >expected &&
git \
-c grep.patterntype=fixed \
grep -E "a+" ab >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c<RED>:<RESET>int main(int argc, const char **argv)
hello.c<RED>-<RESET>{
<RED>--<RESET>
hello.c<RED>:<RESET> /* char ?? */
hello.c<RED>-<RESET>}
<RED>--<RESET>
hello_world<RED>:<RESET>Hello_world
hello_world<RED>-<RESET>HeLLo_world
EOF
test_expect_success 'grep --color, separator' '
test_config color.grep.context normal &&
test_config color.grep.filename normal &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.match normal &&
test_config color.grep.selected normal &&
test_config color.grep.separator red &&
git grep --color=always -A1 -e char -e lo_w hello.c hello_world |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
hello.c: /* char ?? */
hello_world:Hello_world
EOF
test_expect_success 'grep --break' '
git grep --break -e char -e lo_w hello.c hello_world >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c:int main(int argc, const char **argv)
hello.c-{
--
hello.c: /* char ?? */
hello.c-}
hello_world:Hello_world
hello_world-HeLLo_world
EOF
test_expect_success 'grep --break with context' '
git grep --break -A1 -e char -e lo_w hello.c hello_world >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c
int main(int argc, const char **argv)
/* char ?? */
hello_world
Hello_world
EOF
test_expect_success 'grep --heading' '
git grep --heading -e char -e lo_w hello.c hello_world >actual &&
test_cmp expected actual
'
cat >expected <<EOF
<BOLD;GREEN>hello.c<RESET>
4:int main(int argc, const <BLACK;BYELLOW>char<RESET> **argv)
8: /* <BLACK;BYELLOW>char<RESET> ?? */
<BOLD;GREEN>hello_world<RESET>
3:Hel<BLACK;BYELLOW>lo_w<RESET>orld
EOF
test_expect_success 'mimic ack-grep --group' '
test_config color.grep.context normal &&
test_config color.grep.filename "bold green" &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.match "black yellow" &&
test_config color.grep.selected normal &&
test_config color.grep.separator normal &&
git grep --break --heading -n --color \
-e char -e lo_w hello.c hello_world |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
space: line with leading space1
space: line with leading space2
space: line with leading space3
EOF
test_expect_success PCRE 'grep -E "^ "' '
git grep -E "^ " space >actual &&
test_cmp expected actual
'
test_expect_success PCRE 'grep -P "^ "' '
git grep -P "^ " space >actual &&
test_cmp expected actual
'
cat >expected <<EOF
space-line without leading space1
space: line <RED>with <RESET>leading space1
space: line <RED>with <RESET>leading <RED>space2<RESET>
space: line <RED>with <RESET>leading space3
space:line without leading <RED>space2<RESET>
EOF
test_expect_success 'grep --color -e A -e B with context' '
test_config color.grep.context normal &&
test_config color.grep.filename normal &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.matchContext normal &&
test_config color.grep.matchSelected red &&
test_config color.grep.selected normal &&
test_config color.grep.separator normal &&
git grep --color=always -C2 -e "with " -e space2 space |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
space-line without leading space1
space- line with leading space1
space: line <RED>with <RESET>leading <RED>space2<RESET>
space- line with leading space3
space-line without leading space2
EOF
test_expect_success 'grep --color -e A --and -e B with context' '
test_config color.grep.context normal &&
test_config color.grep.filename normal &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.matchContext normal &&
test_config color.grep.matchSelected red &&
test_config color.grep.selected normal &&
test_config color.grep.separator normal &&
git grep --color=always -C2 -e "with " --and -e space2 space |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
space-line without leading space1
space: line <RED>with <RESET>leading space1
space- line with leading space2
space: line <RED>with <RESET>leading space3
space-line without leading space2
EOF
test_expect_success 'grep --color -e A --and --not -e B with context' '
test_config color.grep.context normal &&
test_config color.grep.filename normal &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.matchContext normal &&
test_config color.grep.matchSelected red &&
test_config color.grep.selected normal &&
test_config color.grep.separator normal &&
git grep --color=always -C2 -e "with " --and --not -e space2 space |
test_decode_color >actual &&
test_cmp expected actual
'
cat >expected <<EOF
hello.c-
hello.c=int main(int argc, const char **argv)
hello.c-{
hello.c: pr<RED>int<RESET>f("<RED>Hello<RESET> world.\n");
hello.c- return 0;
hello.c- /* char ?? */
hello.c-}
EOF
test_expect_success 'grep --color -e A --and -e B -p with context' '
test_config color.grep.context normal &&
test_config color.grep.filename normal &&
test_config color.grep.function normal &&
test_config color.grep.linenumber normal &&
test_config color.grep.matchContext normal &&
test_config color.grep.matchSelected red &&
test_config color.grep.selected normal &&
test_config color.grep.separator normal &&
git grep --color=always -p -C3 -e int --and -e Hello --no-index hello.c |
test_decode_color >actual &&
test_cmp expected actual
'
test_expect_success 'grep can find things only in the work tree' '
: >work-tree-only &&
git add work-tree-only &&
test_when_finished "git rm -f work-tree-only" &&
echo "find in work tree" >work-tree-only &&
git grep --quiet "find in work tree" &&
test_must_fail git grep --quiet --cached "find in work tree" &&
test_must_fail git grep --quiet "find in work tree" HEAD
'
test_expect_success 'grep can find things only in the work tree (i-t-a)' '
echo "intend to add this" >intend-to-add &&
git add -N intend-to-add &&
test_when_finished "git rm -f intend-to-add" &&
git grep --quiet "intend to add this" &&
test_must_fail git grep --quiet --cached "intend to add this" &&
test_must_fail git grep --quiet "intend to add this" HEAD
'
test_expect_success 'grep does not search work tree with assume unchanged' '
echo "intend to add this" >intend-to-add &&
git add -N intend-to-add &&
git update-index --assume-unchanged intend-to-add &&
test_when_finished "git rm -f intend-to-add" &&
test_must_fail git grep --quiet "intend to add this" &&
test_must_fail git grep --quiet --cached "intend to add this" &&
test_must_fail git grep --quiet "intend to add this" HEAD
'
test_expect_success 'grep can find things only in the index' '
echo "only in the index" >cache-this &&
git add cache-this &&
rm cache-this &&
test_when_finished "git rm --cached cache-this" &&
test_must_fail git grep --quiet "only in the index" &&
git grep --quiet --cached "only in the index" &&
test_must_fail git grep --quiet "only in the index" HEAD
'
test_expect_success 'grep does not report i-t-a with -L --cached' '
echo "intend to add this" >intend-to-add &&
git add -N intend-to-add &&
test_when_finished "git rm -f intend-to-add" &&
git ls-files | grep -v "^intend-to-add\$" >expected &&
git grep -L --cached "nonexistent_string" >actual &&
test_cmp expected actual
'
test_expect_success 'grep does not report i-t-a and assume unchanged with -L' '
echo "intend to add this" >intend-to-add-assume-unchanged &&
git add -N intend-to-add-assume-unchanged &&
test_when_finished "git rm -f intend-to-add-assume-unchanged" &&
git update-index --assume-unchanged intend-to-add-assume-unchanged &&
git ls-files | grep -v "^intend-to-add-assume-unchanged\$" >expected &&
git grep -L "nonexistent_string" >actual &&
test_cmp expected actual
'
test_done
|
Ikke/git
|
t/t7810-grep.sh
|
Shell
|
gpl-2.0
| 40,221 |
#!/bin/bash
###########################################################################
# update_ts_files.sh
# ---------------------
# Date : July 2007
# Copyright : (C) 2007 by Tim Sutton
# Email : tim at linfiniti dot com
###########################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
###########################################################################
# Update the translation files with strings used in QGIS
# 1. create a clean Qt .pro file for the project
# 2. run lupdate using the .pro file from step 1
# 3. remove the .pro
# Note the .pro file must NOT be named qgis.pro as this
# name is reserved for the Windows qmake project file
echo "deprecated - use push_ts.sh and pull_ts.sh" >&2
set -e
cleanup() {
if [ -f i18n/qgis_ts.tar ]; then
echo Restoring excluded translations
tar -xf i18n/qgis_ts.tar
fi
echo Removing temporary files
perl -i.bak -ne 'print unless /^\s+<location.*(python-i18n|_texts)\.cpp.*$/;' i18n/qgis_*.ts
for i in \
python/python-i18n.{ts,cpp} \
python/plugins/*/python-i18n.{ts,cpp} \
i18n/qgis_*.ts.bak \
src/plugins/grass/grasslabels-i18n.cpp \
i18n/qgis_ts.tar \
qgis_ts.pro
do
[ -f "$i" ] && rm "$i"
done
for i in \
src/plugins/plugin_template/plugingui.cpp \
src/plugins/plugin_template/plugin.cpp
do
[ -f "$i.save" ] && mv "$i.save" "$i"
done
trap "" EXIT
}
PATH=$QTDIR/bin:$PATH
if type qmake-qt5 >/dev/null 2>&1; then
QMAKE=qmake-qt5
else
QMAKE=qmake
fi
if ! type pylupdate5 >/dev/null 2>&1; then
echo "pylupdate5 not found"
exit 1
fi
if type lupdate-qt5 >/dev/null 2>&1; then
LUPDATE=lupdate-qt5
else
LUPDATE=lupdate
fi
exclude="--exclude i18n/qgis_en.ts"
opts="-locations none"
fast=
while (( $# > 0 )); do
arg=$1
shift
if [ "$arg" = "-a" ]; then
arg=$1
shift
if [ -f "i18n/qgis_$arg.ts" ]; then
echo "cannot add existing translation $arg"
exit 1
else
add="$add $arg"
fi
elif [ "$arg" = "-f" ]; then
fast=--remove-files
elif [ -f "i18n/qgis_$arg.ts" ]; then
exclude="$exclude --exclude i18n/qgis_$arg.ts"
else
opts="$opts $arg"
fi
done
trap cleanup EXIT
if [ "$exclude" != "--exclude i18n/qgis_en.ts" -o -n "$add" ]; then
echo Saving excluded translations
tar $fast -cf i18n/qgis_ts.tar i18n/qgis_*.ts $exclude
fi
builddir=$1
if [ -d "$builddir" ]; then
echo Build directory not found
exit 1
fi
if [ ! -f "$builddir/src/core/qgsexpression_texts.cpp" ]; then
echo Generated help files not found
exit 1
fi
echo Updating python translations
cd python
pylupdate5 utils.py {console,pyplugin_installer}/*.{py,ui} -ts python-i18n.ts
perl ../scripts/ts2cpp.pl python-i18n.ts python-i18n.cpp
rm python-i18n.ts
cd ..
for i in python/plugins/*/CMakeLists.txt; do
cd ${i%/*}
pylupdate5 $(find . -name "*.py" -o -name "*.ui") -ts python-i18n.ts
perl ../../../scripts/ts2cpp.pl python-i18n.ts python-i18n.cpp
rm python-i18n.ts
cd ../../..
done
echo Updating GRASS module translations
perl scripts/qgm2cpp.pl >src/plugins/grass/grasslabels-i18n.cpp
mv src/plugins/plugin_template/plugingui.cpp src/plugins/plugin_template/plugingui.cpp.save
echo Creating qmake project file
for i in \
src/plugins/plugin_template/plugingui.cpp \
src/plugins/plugin_template/plugin.cpp
do
[ -f "$i" ] && mv "$i" "$i.save"
done
$QMAKE -project -o qgis_ts.pro -nopwd src python i18n "$builddir/src/core/qgsexpression_texts.cpp"
if [ -n "$add" ]; then
for i in $add; do
echo "Adding translation for $i"
echo "TRANSLATIONS += i18n/qgis_$i.ts" >> qgis_ts.pro
done
fi
echo Updating translations
$LUPDATE $opts -verbose qgis_ts.pro
if [ -z "$fast" ]; then
echo Updating TRANSLATORS File
./scripts/tsstat.pl >doc/TRANSLATORS
fi
cleanup
if [ -n "$add" ]; then
for i in $add; do
if [ -f i18n/qgis_$i.ts ]; then
git add i18n/qgis_$i.ts
else
echo "Translation for $i was not added"
exit 1
fi
done
fi
|
t-hey/QGIS-Original
|
scripts/update_ts_files.sh
|
Shell
|
gpl-2.0
| 4,407 |
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# The strictness specified in Makefile.am:AUTOMAKE_OPTIONS should
# override that specified in configure.ac:AM_INIT_AUTOMAKE, and both
# should override the strictness specified on the command line.
# NOTE: the current semantics might not be the best one (even if it has
# been in place for quite a long time); see also Automake bug #7673.
# Update this test if the semantics are changed.
. test-init.sh
# We want complete control over automake options.
AUTOMAKE=$am_original_AUTOMAKE
cat > Makefile.am <<'END'
AUTOMAKE_OPTIONS =
END
set_strictness ()
{
set +x
sed <$2 >$2-t -e "s|^\\(AUTOMAKE_OPTIONS\\) *=.*|\\1 = $1|" \
-e "s|^\\(AM_INIT_AUTOMAKE\\).*|\\1([$1])|"
mv -f $2-t $2
set -x
cat $2
}
ok ()
{
$AUTOMAKE -Werror $*
}
ko ()
{
AUTOMAKE_fails $*
grep 'required file.*README' stderr
}
$ACLOCAL
# Leave out only one of the required files, to avoid too much
# repetition in the error messages.
touch INSTALL NEWS AUTHORS ChangeLog COPYING
rm -rf autom4te*.cache
set_strictness '' Makefile.am
set_strictness '' configure.ac
ko --gnu
ko
ok --foreign
rm -rf autom4te*.cache
set_strictness 'gnu' Makefile.am
set_strictness '' configure.ac
ko --gnu
ko
ko --foreign
rm -rf autom4te*.cache
set_strictness '' Makefile.am
set_strictness 'gnu' configure.ac
ko --gnu
ko
ko --foreign
rm -rf autom4te*.cache
set_strictness 'foreign' Makefile.am
set_strictness '' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness '' Makefile.am
set_strictness 'foreign' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness 'gnu' Makefile.am
set_strictness 'gnu' configure.ac
ko --gnu
ko
ko --foreign
rm -rf autom4te*.cache
set_strictness 'foreign' Makefile.am
set_strictness 'foreign' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness 'foreign' Makefile.am
set_strictness 'gnu' configure.ac
ok --gnu
ok
ok --foreign
rm -rf autom4te*.cache
set_strictness 'gnu' Makefile.am
set_strictness 'foreign' configure.ac
ko --gnu
ko
ko --foreign
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/strictness-override.sh
|
Shell
|
gpl-2.0
| 2,727 |
#!/bin/sh
# test for fastasoft utility
FASTASORT="../../src/util/fastasort"
FASTALENGTH="../../src/util/fastalength"
INPUTDIR="../data/protein"
INPUTFILE="fastasort.test.fasta"
OUTPUTFILE="fastasort.sorted.test.fasta"
cat $INPUTDIR/*.fasta > $INPUTFILE
$FASTASORT $INPUTFILE --key len > $OUTPUTFILE
PREV_ID="NONE"
PREV_LEN=0
clean_exit(){
rm -f $INPUTFILE $INDEXFILE $OUTPUTFILE
exit $1
}
$FASTALENGTH $OUTPUTFILE | while read LENGTH IDENTIFIER
do
if [ $LENGTH -ge $PREV_LEN ]
then
echo Sorted $IDENTIFIER $LENGTH
else
echo Sorting failure: $PREV_ID $PREV_LEN : $IDENTIFIER $LENGTH
clean_exit 1
fi
PREV_ID=$IDENTIFIER
PREV_LEN=$LENGTH
done
clean_exit 0
|
nathanweeks/exonerate
|
test/util/fastasort.test.sh
|
Shell
|
gpl-3.0
| 718 |
#!/bin/bash
# cleans up logs
find /home/freeciv/freeciv-web/freeciv-web/logs/*.log -exec cp /dev/null {} \;
cp /dev/null /usr/local/nginx/logs/access.log
cp /dev/null /usr/local/nginx/logs/error.log
|
andreasrosdal/freeciv-web
|
scripts/log-cleanup.sh
|
Shell
|
agpl-3.0
| 201 |
#!/bin/bash
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# environment_plist generates a plist file that contains some
# environment variables of the host machine (like DTPlatformBuild
# or BuildMachineOSBuild) given a target platform.
#
# This script only runs on darwin and you must have Xcode installed.
#
# --output - the path to place the output plist file.
# --platform - the target platform, e.g. 'iphoneos' or 'iphonesimulator8.3'
#
set -u
while [[ $# > 1 ]]
do
key="$1"
case $key in
--platform)
PLATFORM="$2"
shift
;;
--output)
OUTPUT="$2"
shift
;;
*)
# unknown option
;;
esac
shift
done
PLATFORM_DIR=$(/usr/bin/xcrun --sdk "${PLATFORM}" --show-sdk-platform-path)
PLATFORM_PLIST="${PLATFORM_DIR}"/Info.plist
TEMPDIR=$(mktemp -d "${TMPDIR:-/tmp}/bazel_environment.XXXXXX")
PLIST="${TEMPDIR}/env.plist"
trap 'rm -rf "${TEMPDIR}"' ERR EXIT
os_build=$(/usr/bin/sw_vers -buildVersion)
compiler=$(/usr/libexec/PlistBuddy -c "print :DefaultProperties:DEFAULT_COMPILER" "${PLATFORM_PLIST}")
platform_version=$(/usr/bin/xcodebuild -version -sdk "${PLATFORM}" PlatformVersion)
sdk_build=$(/usr/bin/xcodebuild -version -sdk "${PLATFORM}" ProductBuildVersion)
platform_build=$"${sdk_build}"
xcode_build=$(/usr/bin/xcodebuild -version | grep Build | cut -d ' ' -f3)
xcode_version_string=$(/usr/bin/xcodebuild -version | grep Xcode | cut -d ' ' -f2)
xcode_version=$(/usr/bin/printf '%02d%d%d\n' $(echo "${xcode_version_string//./ }"))
/usr/bin/defaults write "${PLIST}" DTPlatformBuild -string ${platform_build:-""}
/usr/bin/defaults write "${PLIST}" DTSDKBuild -string ${sdk_build:-""}
/usr/bin/defaults write "${PLIST}" DTPlatformVersion -string ${platform_version:-""}
/usr/bin/defaults write "${PLIST}" DTXcode -string ${xcode_version:-""}
/usr/bin/defaults write "${PLIST}" DTXcodeBuild -string ${xcode_build:-""}
/usr/bin/defaults write "${PLIST}" DTCompiler -string ${compiler:-""}
/usr/bin/defaults write "${PLIST}" BuildMachineOSBuild -string ${os_build:-""}
cat "${PLIST}" > "${OUTPUT}"
|
UrbanCompass/bazel
|
src/tools/xcode/environment/environment_plist.sh
|
Shell
|
apache-2.0
| 2,600 |
#!/bin/bash
# Exit upon any errors
set -e
# Set defaults
HOST=
PORT=
USER=root
PASSWORD=
DATABASE=deploy
CURRENT_VERSION=
TARGET_VERSION=
DRYRUN=false
usage() {
echo "Usage: upgrade [-H|--host HOST] [-P|--port PORT]"
echo " [-u|--user USERNAME] [-p|--password PASSWORD]"
echo " [-d|--database DATABASE] [-v|--version VERSION]"
echo " [--dry-run] [-h|--help]"
echo "optional arguments:"
echo "-h, --help show this help message and exit"
echo "-H, --host HOST DB server host name, default is localhost"
echo "-P, --port HOST DB server port number, default is 3306"
echo "-u, --user USERNAME DB server user name, default is root"
echo "-p, --password PASSWORD DB server password, default is empty"
echo "-d, --database DATABASE database to use, default is deploy"
echo "-v, --version VERSION the schema version to upgrade to, default is the highest version possible"
echo "--dry-run print upgrade route without action"
}
resolve_target_version() {
# list all files like schema-update-N.sql, and use the largest N as the target version
TARGET_VERSION=0
for f in schema-update-*.sql
do
tmp=${f#schema-update*-}
num=${tmp%.sql}
if (( num > TARGET_VERSION )); then
TARGET_VERSION=$num
fi
done
}
while [[ $# -ge 1 ]]
do
key="$1"
case $key in
-H|--host)
HOST="$2"
shift # past argument
;;
-P|--port)
PORT="$2"
shift # past argument
;;
-u|--user)
USER="$2"
shift # past argument
;;
-p|--password)
PASSWORD="$2"
shift # past argument
;;
-d|--database)
DATABASE="$2"
shift # past argument
;;
-v|--version)
TARGET_VERSION="$2"
shift # past argument
;;
-h|--help)
usage #unknown option
exit 0
;;
--dry-run)
DRYRUN=true
;;
*)
echo "Unknown option $1"
usage #unknown option
exit 1
;;
esac
shift # past argument or value
done
[ ! -z "$HOST" ] && HOST="--host=$HOST"
[ ! -z "$PORT" ] && PORT="--port=$PORT"
[ ! -z "$PASSWORD" ] && PASSWORD="--password=$PASSWORD"
# get the directory of this script
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CURRENT_VERSION=$(mysql $HOST $PORT -u $USER $PASSWORD $DATABASE -s -N < $DIR/check_version.sql)
[ -z "$TARGET_VERSION" ] && resolve_target_version
echo "Will upgrade schema from version ${CURRENT_VERSION} to version ${TARGET_VERSION}"
for ((i=CURRENT_VERSION+1;i<=TARGET_VERSION;i++)); do
echo "upgrading to version ${i} with schema-update-$i.sql..."
if [ "$DRYRUN" == "true" ]; then
continue
fi
mysql $HOST $PORT -u $USER $PASSWORD $DATABASE < $DIR/schema-update-$i.sql
done
echo "Successfully upgraded schema from version ${CURRENT_VERSION} to version ${TARGET_VERSION}"
|
brennentsmith/teletraan
|
tools/mysql/upgrade.sh
|
Shell
|
apache-2.0
| 2,845 |
cp manifest_test.json manifest.json
export BUILD_DIR=~/Downloads/quickbug
rm -rf $BUILD_DIR
rm -rf core
mkdir core
cp -r ../../core/* core
cp ../../lib/bookmarks/bookmark.js core
cp -r . $BUILD_DIR
cp ../../core/foam.css $BUILD_DIR
# For code compression, uncomment the following line:
# ~/node_modules/uglify-js/bin/uglifyjs --overwrite "$BUILD_DIR/foam.js"
# ../../core/ChromeStorage.js \
# ../../demos/benchmark_data.json \
# ../../demos/photo.js \
|
jacksonic/foam
|
apps/quickbug/build_test.sh
|
Shell
|
apache-2.0
| 457 |
#!/bin/bash
# Run the thermo estimator on the given thermo input file
python -m scoop ../../scripts/thermoEstimator.py input_QM.py
|
pierrelb/RMG-Py
|
examples/thermoEstimator/run_QM.sh
|
Shell
|
mit
| 132 |
#!/bin/bash -eu
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
tools/oss-fuzz.sh
|
googlefonts/oss-fuzz
|
projects/systemd/build.sh
|
Shell
|
apache-2.0
| 694 |
#!/bin/sh
PATH=/bin:/sbin:/usr/bin:/usr/sbin
if [ -n "$1" ]; then
jails=$1
else
jails=$(cd /jails ; ls -1)
fi
date=$(date +"%Y%m%d_%H%M%S")
zfs snapshot zroot/usr/ports@${date}
for jail in $jails; do
D=/jails/$jail
zfs destroy zroot$D/usr/ports 2>/dev/null
zfs clone zroot/usr/ports@${date} zroot$D/usr/ports
done
|
chtyim/infrastructure-puppet
|
modules/rootbin_asf/files/bin/fbsd_jail_ports_tree_update.sh
|
Shell
|
apache-2.0
| 329 |
#!/bin/bash
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
cd "$(dirname "$0")/../../.."
mkdir -p cmake/build
cd cmake/build
# MSBUILD_CONFIG's values are suitable for cmake as well
cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" "$@" ../..
|
jtattermusch/grpc
|
tools/run_tests/helper_scripts/pre_build_cmake.sh
|
Shell
|
apache-2.0
| 802 |
#! /bin/sh
system=`uname -s`
case $system in
Darwin | FreeBSD | NetBSD | OpenBSD | DragonFly)
ncpu=`sysctl -n hw.ncpu 2>/dev/null`
;;
SunOS)
ncpu=`psrinfo -p`
;;
Linux)
ncpu=`cat /proc/cpuinfo | grep '^processor' | wc -l`
;;
*)
;;
esac
if [ -z "$ncpu" ]; then
ncpu=1
fi
echo $ncpu
|
cgerum/libcpu
|
CMake/GetCPUCount.sh
|
Shell
|
bsd-2-clause
| 324 |
#! /bin/sh
# Test that gettext() in multithreaded applications works correctly.
# Copyright (C) 2008-2014 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <http://www.gnu.org/licenses/>.
set -e
common_objpfx=$1
test_program_prefix=$2
objpfx=$3
# Create the domain directory.
mkdir -p ${objpfx}domaindir/ja_JP/LC_MESSAGES
# Populate it.
msgfmt -o ${objpfx}domaindir/ja_JP/LC_MESSAGES/tstgettext6.mo ../po/ja.po
${test_program_prefix} ${objpfx}tst-gettext6 > ${objpfx}tst-gettext6.out
exit $?
|
vvavrychuk/glibc
|
intl/tst-gettext6.sh
|
Shell
|
gpl-2.0
| 1,160 |
#! /bin/bash
# A tls test.
# Copyright (C) 2003-2014 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <http://www.gnu.org/licenses/>.
set -e
common_objpfx=$1; shift
test_via_rtld_prefix=$1; shift
test_wrapper_env=$1; shift
run_program_env=$1; shift
logfile=$common_objpfx/nptl/tst-tls6.out
# We have to find libc and nptl
library_path=${common_objpfx}:${common_objpfx}nptl
tst_tls5="${test_via_rtld_prefix} ${common_objpfx}/nptl/tst-tls5"
> $logfile
fail=0
for aligned in a e f; do
echo "preload tst-tls5mod{$aligned,b,c,d}.so" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
${run_program_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{$aligned,b,c,d}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
echo "preload tst-tls5mod{b,$aligned,c,d}.so" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
${run_program_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{b,$aligned,c,d}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
echo "preload tst-tls5mod{b,c,d,$aligned}.so" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
${run_program_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{b,c,d,$aligned}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
done
echo "preload tst-tls5mod{d,a,b,c,e}" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
${run_program_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{d,a,b,c,e}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
echo "preload tst-tls5mod{d,a,b,e,f}" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
${run_program_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{d,a,b,e,f}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
exit $fail
|
infoburp/glibc
|
nptl/tst-tls6.sh
|
Shell
|
gpl-2.0
| 2,638 |
#!/bin/sh
#
# Build a fat binary on Mac OS X, thanks Ryan!
# Number of CPUs (for make -j)
NCPU=`sysctl -n hw.ncpu`
if test x$NJOB = x; then
NJOB=$NCPU
fi
# Generic, cross-platform CFLAGS you always want go here.
CFLAGS="-O3 -g -pipe"
# PowerPC 32-bit configure flags (10.4 runtime compatibility)
# We dynamically load X11, so using the system X11 headers is fine.
CONFIG_PPC="--build=`uname -p`-apple-darwin --host=powerpc-apple-darwin \
--x-includes=/usr/X11R6/include --x-libraries=/usr/X11R6/lib"
# PowerPC 32-bit compiler flags
CC_PPC="gcc-4.0 -arch ppc"
CXX_PPC="g++-4.0 -arch ppc"
CFLAGS_PPC="-mmacosx-version-min=10.4"
CPPFLAGS_PPC="-DMAC_OS_X_VERSION_MIN_REQUIRED=1040 \
-nostdinc \
-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks \
-I/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/powerpc-apple-darwin10/4.0.1/include \
-isystem /Developer/SDKs/MacOSX10.4u.sdk/usr/include"
# PowerPC 32-bit linker flags
LFLAGS_PPC="-arch ppc -Wl,-headerpad_max_install_names -mmacosx-version-min=10.4 \
-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks \
-L/Developer/SDKs/MacOSX10.4u.sdk/usr/lib/gcc/powerpc-apple-darwin10/4.0.1 \
-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk"
# PowerPC 64-bit configure flags (10.5 runtime compatibility)
# We dynamically load X11, so using the system X11 headers is fine.
CONFIG_PPC64="--build=`uname -p`-apple-darwin --host=powerpc-apple-darwin \
--x-includes=/usr/X11R6/include --x-libraries=/usr/X11R6/lib"
# PowerPC 64-bit compiler flags
CC_PPC64="gcc-4.0 -arch ppc64"
CXX_PPC64="g++-4.0 -arch ppc64"
CFLAGS_PPC64="-mmacosx-version-min=10.5"
CPPFLAGS_PPC64="-DMAC_OS_X_VERSION_MIN_REQUIRED=1050 \
-nostdinc \
-F/Developer/SDKs/MacOSX10.5.sdk/System/Library/Frameworks \
-I/Developer/SDKs/MacOSX10.5.sdk/usr/lib/gcc/powerpc-apple-darwin10/4.0.1/include \
-isystem /Developer/SDKs/MacOSX10.5.sdk/usr/include"
# PowerPC 64-bit linker flags
LFLAGS_PPC64="-arch ppc64 -Wl,-headerpad_max_install_names -mmacosx-version-min=10.5 \
-F/Developer/SDKs/MacOSX10.5.sdk/System/Library/Frameworks \
-L/Developer/SDKs/MacOSX10.5.sdk/usr/lib/gcc/powerpc-apple-darwin10/4.0.1/ppc64 \
-Wl,-syslibroot,/Developer/SDKs/MacOSX10.5.sdk"
# Intel 32-bit configure flags (10.4 runtime compatibility)
# We dynamically load X11, so using the system X11 headers is fine.
CONFIG_X86="--build=`uname -p`-apple-darwin --host=i386-apple-darwin \
--x-includes=/usr/X11R6/include --x-libraries=/usr/X11R6/lib"
# Intel 32-bit configure flags (10.4 runtime compatibility)
# We dynamically load X11, so using the system X11 headers is fine.
CONFIG_X86="--build=`uname -p`-apple-darwin --host=i386-apple-darwin \
--x-includes=/usr/X11R6/include --x-libraries=/usr/X11R6/lib"
# They changed this to "darwin10" in Xcode 3.2 (Snow Leopard).
GCCUSRPATH_X86="$SDK_PATH/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin9/4.0.1"
if [ ! -d "$GCCUSRPATH" ]; then
GCCUSRPATH_X86="$SDK_PATH/MacOSX10.4u.sdk/usr/lib/gcc/i686-apple-darwin10/4.0.1"
fi
if [ ! -d "$GCCUSRPATH_X86" ]; then
echo "Couldn't find any GCC usr path for x86"
exit 1
fi
# Intel 32-bit compiler flags
CC_X86="gcc-4.0 -arch i386"
CXX_X86="g++-4.0 -arch i386"
CFLAGS_X86="-mmacosx-version-min=10.4"
CPPFLAGS_X86="-DMAC_OS_X_VERSION_MIN_REQUIRED=1040 \
-nostdinc \
-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks \
-I$GCCUSRPATH_X86/include \
-isystem /Developer/SDKs/MacOSX10.4u.sdk/usr/include"
# Intel 32-bit linker flags
LFLAGS_X86="-arch i386 -Wl,-headerpad_max_install_names -mmacosx-version-min=10.4 \
-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks \
-L$GCCUSRPATH_X86 \
-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk"
# Intel 64-bit configure flags (10.5 runtime compatibility)
# We dynamically load X11, so using the system X11 headers is fine.
CONFIG_X64="--build=`uname -p`-apple-darwin --host=i386-apple-darwin \
--x-includes=/usr/X11R6/include --x-libraries=/usr/X11R6/lib"
# Intel 64-bit compiler flags
CC_X64="gcc-4.0 -arch x86_64"
CXX_X64="g++-4.0 -arch x86_64"
CFLAGS_X64="-mmacosx-version-min=10.5"
CPPFLAGS_X64="-DMAC_OS_X_VERSION_MIN_REQUIRED=1050 \
-nostdinc \
-F/Developer/SDKs/MacOSX10.5.sdk/System/Library/Frameworks \
-I/Developer/SDKs/MacOSX10.5.sdk/usr/lib/gcc/i686-apple-darwin10/4.0.1/include \
-isystem /Developer/SDKs/MacOSX10.5.sdk/usr/include"
# Intel 64-bit linker flags
LFLAGS_X64="-arch x86_64 -Wl,-headerpad_max_install_names -mmacosx-version-min=10.5 \
-F/Developer/SDKs/MacOSX10.5.sdk/System/Library/Frameworks \
-L/Developer/SDKs/MacOSX10.5.sdk/usr/lib/gcc/i686-apple-darwin10/4.0.1/x86_64 \
-Wl,-syslibroot,/Developer/SDKs/MacOSX10.5.sdk"
#
# Find the configure script
#
srcdir=`dirname $0`/..
auxdir=$srcdir/build-scripts
cd $srcdir
#
# Figure out which phase to build:
# all,
# configure, configure-ppc, configure-ppc64, configure-x86, configure-x64
# make, make-ppc, make-ppc64, make-x86, make-x64, merge
# install
# clean
if test x"$1" = x; then
phase=all
else
phase="$1"
fi
case $phase in
all)
configure_ppc="yes"
configure_ppc64="yes"
configure_x86="yes"
configure_x64="yes"
make_ppc="yes"
make_ppc64="yes"
make_x86="yes"
make_x64="yes"
merge="yes"
;;
configure)
configure_ppc="yes"
configure_ppc64="yes"
configure_x86="yes"
configure_x64="yes"
;;
configure-ppc)
configure_ppc="yes"
;;
configure-ppc64)
configure_ppc64="yes"
;;
configure-x86)
configure_x86="yes"
;;
configure-x64)
configure_x64="yes"
;;
make)
make_ppc="yes"
make_ppc64="yes"
make_x86="yes"
make_x64="yes"
merge="yes"
;;
make-ppc)
make_ppc="yes"
;;
make-ppc64)
make_ppc64="yes"
;;
make-x86)
make_x86="yes"
;;
make-x64)
make_x64="yes"
;;
merge)
merge="yes"
;;
install)
install_bin="yes"
install_hdrs="yes"
install_lib="yes"
install_data="yes"
install_man="yes"
;;
install-bin)
install_bin="yes"
;;
install-hdrs)
install_hdrs="yes"
;;
install-lib)
install_lib="yes"
;;
install-data)
install_data="yes"
;;
install-man)
install_man="yes"
;;
clean)
clean_ppc="yes"
clean_ppc64="yes"
clean_x86="yes"
clean_x64="yes"
;;
clean-ppc)
clean_ppc="yes"
;;
clean-ppc64)
clean_ppc64="yes"
;;
clean-x86)
clean_x86="yes"
;;
clean-x64)
clean_x64="yes"
;;
*)
echo "Usage: $0 [all|configure[-ppc|-ppc64|-x86|-x64]|make[-ppc|-ppc64|-x86|-x64]|merge|install|clean[-ppc|-ppc64|-x86|-x64]]"
exit 1
;;
esac
case `uname -p` in
powerpc)
native_path=ppc
;;
powerpc64)
native_path=ppc64
;;
*86)
native_path=x86
;;
x86_64)
native_path=x64
;;
*)
echo "Couldn't figure out native architecture path"
exit 1
;;
esac
#
# Create the build directories
#
for dir in build build/ppc build/ppc64 build/x86 build/x64; do
if test -d $dir; then
:
else
mkdir $dir || exit 1
fi
done
#
# Build the PowerPC 32-bit binary
#
if test x$configure_ppc = xyes; then
(cd build/ppc && \
sh ../../configure $CONFIG_PPC CC="$CC_PPC" CXX="$CXX_PPC" CFLAGS="$CFLAGS $CFLAGS_PPC" CPPFLAGS="$CPPFLAGS_PPC" LDFLAGS="$LFLAGS_PPC") || exit 2
fi
if test x$make_ppc = xyes; then
(cd build/ppc && ls include && make -j$NJOB) || exit 3
fi
#
# Build the PowerPC 64-bit binary
#
if test x$configure_ppc64 = xyes; then
(cd build/ppc64 && \
sh ../../configure $CONFIG_PPC64 CC="$CC_PPC64" CXX="$CXX_PPC64" CFLAGS="$CFLAGS $CFLAGS_PPC64" CPPFLAGS="$CPPFLAGS_PPC64" LDFLAGS="$LFLAGS_PPC64") || exit 2
fi
if test x$make_ppc64 = xyes; then
(cd build/ppc64 && ls include && make -j$NJOB) || exit 3
fi
#
# Build the Intel 32-bit binary
#
if test x$configure_x86 = xyes; then
(cd build/x86 && \
sh ../../configure $CONFIG_X86 CC="$CC_X86" CXX="$CXX_X86" CFLAGS="$CFLAGS $CFLAGS_X86" CPPFLAGS="$CPPFLAGS_X86" LDFLAGS="$LFLAGS_X86") || exit 2
fi
if test x$make_x86 = xyes; then
(cd build/x86 && make -j$NJOB) || exit 3
fi
#
# Build the Intel 32-bit binary
#
if test x$configure_x64 = xyes; then
(cd build/x64 && \
sh ../../configure $CONFIG_X64 CC="$CC_X64" CXX="$CXX_X64" CFLAGS="$CFLAGS $CFLAGS_X64" CPPFLAGS="$CPPFLAGS_X64" LDFLAGS="$LFLAGS_X64") || exit 2
fi
if test x$make_x64 = xyes; then
(cd build/x64 && make -j$NJOB) || exit 3
fi
#
# Combine into fat binary
#
if test x$merge = xyes; then
output=.libs
sh $auxdir/mkinstalldirs build/$output
cd build
target=`find . -mindepth 4 -maxdepth 4 -type f -name '*.dylib' | head -1 | sed 's|.*/||'`
(lipo -create -o $output/$target `find . -mindepth 4 -maxdepth 4 -type f -name "*.dylib"` &&
ln -sf $target $output/libSDL.dylib &&
lipo -create -o $output/libSDL.a */build/.libs/libSDL.a &&
cp $native_path/build/.libs/libSDL.la $output &&
cp $native_path/build/.libs/libSDL.lai $output &&
cp $native_path/build/libSDL.la . &&
lipo -create -o libSDLmain.a */build/libSDLmain.a &&
echo "Build complete!" &&
echo "Files can be found in the build directory.") || exit 4
cd ..
fi
#
# Install
#
do_install()
{
echo $*
$* || exit 5
}
if test x$prefix = x; then
prefix=/usr/local
fi
if test x$exec_prefix = x; then
exec_prefix=$prefix
fi
if test x$bindir = x; then
bindir=$exec_prefix/bin
fi
if test x$libdir = x; then
libdir=$exec_prefix/lib
fi
if test x$includedir = x; then
includedir=$prefix/include
fi
if test x$datadir = x; then
datadir=$prefix/share
fi
if test x$mandir = x; then
mandir=$prefix/man
fi
if test x$install_bin = xyes; then
do_install sh $auxdir/mkinstalldirs $bindir
do_install /usr/bin/install -c -m 755 build/$native_path/sdl-config $bindir/sdl-config
fi
if test x$install_hdrs = xyes; then
do_install sh $auxdir/mkinstalldirs $includedir/SDL
for src in $srcdir/include/*.h; do \
file=`echo $src | sed -e 's|^.*/||'`; \
do_install /usr/bin/install -c -m 644 $src $includedir/SDL/$file; \
done
do_install /usr/bin/install -c -m 644 $srcdir/include/SDL_config_macosx.h $includedir/SDL/SDL_config.h
fi
if test x$install_lib = xyes; then
do_install sh $auxdir/mkinstalldirs $libdir
do_install sh build/$native_path/libtool --mode=install /usr/bin/install -c build/libSDL.la $libdir/libSDL.la
do_install /usr/bin/install -c -m 644 build/libSDLmain.a $libdir/libSDLmain.a
do_install ranlib $libdir/libSDLmain.a
fi
if test x$install_data = xyes; then
do_install sh $auxdir/mkinstalldirs $datadir/aclocal
do_install /usr/bin/install -c -m 644 $srcdir/sdl.m4 $datadir/aclocal/sdl.m4
fi
if test x$install_man = xyes; then
do_install sh $auxdir/mkinstalldirs $mandir/man3
for src in $srcdir/docs/man3/*.3; do \
file=`echo $src | sed -e 's|^.*/||'`; \
do_install /usr/bin/install -c -m 644 $src $mandir/man3/$file; \
done
fi
#
# Clean up
#
do_clean()
{
echo $*
$* || exit 6
}
if test x$clean_x86 = xyes; then
do_clean rm -r build/x86
fi
if test x$clean_ppc = xyes; then
do_clean rm -r build/ppc
fi
|
albertz/sdl
|
build-scripts/fatbuild.sh
|
Shell
|
lgpl-2.1
| 11,490 |
#! /bin/sh
echo 'Starting Fault-Tolerant version of ProActive NBody...'
workingDir=`dirname $0`
. ${workingDir}/nbody.sh -displayft "$@"
|
PaulKh/scale-proactive
|
examples/nbody/nbodyFaultTolerance.sh
|
Shell
|
agpl-3.0
| 140 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
git config user.name "minikube-bot"
git config user.email "[email protected]"
REPLACE_PKG_VERSION=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_BUILD}
REPLACE_MINIKUBE_LINUX_SHA256=$(awk '{ print $1 }' out/minikube-linux-amd64.sha256)
REPLACE_MINIKUBE_DARWIN_SHA256=$(awk '{ print $1 }' out/minikube-darwin-amd64.sha256)
REPLACE_CASK_CHECKPOINT=$(curl \
--compressed \
--location \
--user-agent 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36' \
https://github.com/kubernetes/minikube/releases.atom \
| sed 's|<pubDate>[^<]*</pubDate>||g' \
| shasum -a 256 | awk '{ print $1 }')
MINIKUBE_ROOT=$PWD
GIT_SSH_COMMAND='ssh -i $ARCH_SSH_KEY' git clone ssh://[email protected]/minikube.git aur-minikube
pushd aur-minikube >/dev/null
sed -e "s/\$PKG_VERSION/${REPLACE_PKG_VERSION}/g" \
-e "s/\$MINIKUBE_LINUX_SHA256/${REPLACE_MINIKUBE_LINUX_SHA256}/g" \
$MINIKUBE_ROOT/installers/linux/archlinux/PKGBUILD > PKGBUILD
sed -e "s/\$PKG_VERSION/${REPLACE_PKG_VERSION}/g" \
-e "s/\$MINIKUBE_LINUX_SHA256/${REPLACE_MINIKUBE_LINUX_SHA256}/g" \
$MINIKUBE_ROOT/installers/linux/archlinux/.SRCINFO > .SRCINFO
git add PKGBUILD .SRCINFO
git commit -m "Upgrade to version ${REPLACE_PKG_VERSION}"
GIT_SSH_COMMAND='ssh -i $ARCH_SSH_KEY' git push origin master
popd >/dev/null
git clone --depth 1 [email protected]:minikube-bot/homebrew-cask.git # dont't pull entire history
pushd homebrew-cask >/dev/null
git checkout -b ${REPLACE_PKG_VERSION}
sed -e "s/\$PKG_VERSION/${REPLACE_PKG_VERSION}/g" \
-e "s/\$MINIKUBE_DARWIN_SHA256/${REPLACE_MINIKUBE_DARWIN_SHA256}/g" \
-e "s/\$CASK_CHECKPOINT/${REPLACE_CASK_CHECKPOINT}/g" \
$MINIKUBE_ROOT/installers/darwin/brew-cask/minikube.rb.tmpl > Casks/minikube.rb
git add Casks/minikube.rb
git commit -F- <<EOF
Update minikube to ${REPLACE_PKG_VERSION}
- [x] brew cask audit --download {{cask_file}} is error-free.
- [x] brew cask style --fix {{cask_file}} reports no offenses.
- [x] The commit message includes the cask’s name and version.
EOF
git push origin ${REPLACE_PKG_VERSION}
curl -v -k -u minikube-bot:${BOT_PASSWORD} -X POST https://api.github.com/repos/caskroom/homebrew-cask/pulls \
-d @- <<EOF
{
"title": "Update minikube to ${REPLACE_PKG_VERSION}",
"head": "minikube-bot:${REPLACE_PKG_VERSION}",
"base": "master"
}
EOF
popd >/dev/null
rm -rf aur-minikube homebrew-cask
|
rawlingsj/gofabric8
|
vendor/k8s.io/minikube/hack/jenkins/release_update_installers.sh
|
Shell
|
apache-2.0
| 3,273 |
#! /bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
DMLC_CORE=$(pwd)/../3rdparty/dmlc-core
cd ../3rdparty/tvm/nnvm/amalgamation
make clean
make DMLC_CORE_PATH=$DMLC_CORE nnvm.d
cp nnvm.d ../../../../amalgamation/
echo '#define MSHADOW_FORCE_STREAM
#ifndef MSHADOW_USE_CBLAS
#if (__MIN__ == 1)
#define MSHADOW_USE_CBLAS 0
#else
#define MSHADOW_USE_CBLAS 1
#endif
#endif
#define MSHADOW_USE_CUDA 0
#define MSHADOW_USE_MKL 0
#define MSHADOW_RABIT_PS 0
#define MSHADOW_DIST_PS 0
#define DMLC_LOG_STACK_TRACE 0
#include "mshadow/tensor.h"
#include "mxnet/base.h"
#include "dmlc/json.h"
#include "mxnet/tensor_blob.h"' > temp
cat nnvm.cc >> temp
mv temp ../../../../amalgamation/nnvm.cc
|
eric-haibin-lin/mxnet
|
amalgamation/prep_nnvm.sh
|
Shell
|
apache-2.0
| 1,449 |
#!/bin/bash
if [ -z $1 ]
then
VERSION=UNKNOWN
echo "You must pass a version!"
exit 1
else
VERSION=$1
fi
DIRBASE=lisp-codec
FULLDIR=${DIRBASE}-full
ASDFDIR=${DIRBASE}-asdf
DEVDIR=${DIRBASE}-dev
DISTDIR=dist
rm -Rf ${FULLDIR}
rm -Rf ${ASDFDIR}
rm -Rf ${DEVDIR}
rm -Rf ${DISTDIR}
# Making the development snapshot.
svn export http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Lisp ${DEVDIR}
# Preparing the *asdf* distribution.
mkdir ${ASDFDIR}
cp -R ${DEVDIR}/src/* ${ASDFDIR}
cp -R ${DEVDIR}/examples ${ASDFDIR}
mv ${ASDFDIR}/examples/ ${ASDFDIR}/rl-glue-examples/
mkdir ${ASDFDIR}/doc
cp ${DEVDIR}/doc/manual/lisp-codec.pdf ${ASDFDIR}/doc
cp -R ${DEVDIR}/doc/reference ${ASDFDIR}/doc
# Preparing the *full* distribution.
mkdir ${FULLDIR}
mkdir ${FULLDIR}/asdf
cp -R ${ASDFDIR}/* ${FULLDIR}/asdf
mv ${FULLDIR}/asdf/doc/ ${FULLDIR}/
cp setup.lisp ${FULLDIR}
cvs -f -d:pserver:anonymous:@cclan.cvs.sourceforge.net:/cvsroot/cclan login
cvs -fz3 -d:pserver:[email protected]:/cvsroot/cclan export -D "1 day ago" asdf
cvs -f -d:pserver:[email protected]:/cvsroot/cclan logout
mkdir ${FULLDIR}/asdf
mv asdf/asdf.lisp ${FULLDIR}/asdf/
rm -Rf asdf
SPLIT_SEQUENCE=split-sequence
USOCKET=usocket-0.4.1
wget -P ${FULLDIR}/asdf http://ftp.linux.org.uk/pub/lisp/experimental/cclan/${SPLIT_SEQUENCE}.tar.gz
wget -P ${FULLDIR}/asdf http://common-lisp.net/project/usocket/releases/${USOCKET}.tar.gz
for f in ${SPLIT_SEQUENCE} ${USOCKET}; do
gzip -d ${FULLDIR}/asdf/${f}.tar.gz
tar -xvf ${FULLDIR}/asdf/${f}.tar -C ${FULLDIR}/asdf
rm -f ${FULLDIR}/asdf/${f}.tar
done
# Creating the distributions.
mkdir ${DISTDIR}
DEVFILEBASE=${DEVDIR}-${VERSION}
DEVTAR=${DEVFILEBASE}.tar
tar -cf ${DEVTAR} ${DEVDIR}
gzip ${DEVTAR}
FULLFILEBASE=${FULLDIR}-${VERSION}
FULLTAR=${FULLFILEBASE}.tar
tar -cf ${FULLTAR} ${FULLDIR}
gzip ${FULLTAR}
ASDFFILEBASE=${ASDFDIR}-${VERSION}
ASDFTAR=${ASDFFILEBASE}.tar
tar -cf ${ASDFTAR} ${ASDFDIR}
gzip ${ASDFTAR}
mv *.tar.gz ${DISTDIR}/
rm -Rf ${DEVDIR}
rm -Rf ${FULLDIR}
rm -Rf ${ASDFDIR}
# Uploading the distributions.
python ../googlecode_upload.py -s "RL-Glue Lisp Codec Full $VERSION" -p rl-glue-ext --labels=Type-Installer,OpSys-All,Language-Lisp,Audience-User ${DISTDIR}/${FULLTAR}.gz
python ../googlecode_upload.py -s "RL-Glue Lisp Codec ASDF $VERSION" -p rl-glue-ext --labels=Type-Installer,OpSys-All,Language-Lisp,Audience-User ${DISTDIR}/${ASDFTAR}.gz
python ../googlecode_upload.py -s "RL-Glue Lisp Codec Dev $VERSION" -p rl-glue-ext --labels=Type-Installer,OpSys-All,Language-Lisp,Audience-Dev ${DISTDIR}/${DEVTAR}.gz
# Updating the Wiki.
python substitute-lisp-strings.py $VERSION ${FULLFILEBASE} ${ASDFFILEBASE} ${DEVFILEBASE}
cp Lisp.wiki ../wiki/lisp.new
cd ../wiki
svn up
mv lisp.new Lisp.wiki
svn commit Lisp.wiki -m "Automated update of Lisp wiki page."
|
aksmas/rl-glue-ext
|
projects/distribution_tools/Lisp-Codec/download-lisp.bash
|
Shell
|
apache-2.0
| 2,870 |
#!/usr/bin/zsh
OLDPWD="$(pwd)"
cd $HOME
TESTFOLDER="${HOME}/p9k"
mkdir -p $TESTFOLDER
cd $TESTFOLDER
# Make a deep test folder
mkdir -p deep-folder/1/12/123/1234/12345/123456/1234567/123455678/123456789
# Make a git repo
mkdir git-repo
cd git-repo
git config --global user.email "[email protected]"
git config --global user.name "Testing Tester"
git init
echo "TEST" >> testfile
git add testfile
git commit -m "Initial commit"
cd $TESTFOLDER
# Make a Mercurial repo
mkdir hg-repo
cd hg-repo
export HGUSER="Test bot <[email protected]>"
hg init
echo "TEST" >> testfile
hg add testfile
hg ci -m "Initial commit"
cd $TESTFOLDER
# Setup a SVN folder
svnadmin create ~/.svn-repo
mkdir svn-repo
svn checkout "file://${HOME}/.svn-repo" "svn-repo"
cd svn-repo
echo "TEST" >> testfile
svn add testfile
svn commit -m "Initial commit"
cd $TESTFOLDER
cd $OLDPWD
|
ccollins/dotfiles
|
zsh/oh-my-zsh.symlink/themes/powerlevel9k/test-vm-providers/setup-environment.sh
|
Shell
|
mit
| 861 |
for file in $(ls *.hs); do
content=`cat $file`
echo $file
lines= sloccount $file | grep "Total Physical Source"
echo $lines
python count.py $file $lines
#echo "Time = "
#time liquid $file > /dev/null | tail -n1
echo ""
done
|
mightymoose/liquidhaskell
|
benchmarks/esop2013-submission/count.sh
|
Shell
|
bsd-3-clause
| 224 |
#!/bin/sh
# I put a variable in my scripts named PROGNAME which
# holds the name of the program being run. You can get this
# value from the first item on the command line ($0).
PROGNAME=$(basename $0)
error_exit ()
{
# ----------------------------------------------------------------
# Function for exit due to fatal program error
# Accepts 1 argument:
# string containing descriptive error message
# ----------------------------------------------------------------
echo "${PROGNAME}: ${1:-"Unknown Error"}" 1>&2
exit 1
}
cd $1/src || error_exit "$LINENO: Cannot change directory! Aborting"
HASH=`find -type f -print0 | sort -z | xargs -0 sha1sum | sha1sum `
cd $1 || error_exit "$LINENO: Cannot change directory! Aborting"
mv $1/src/Device.cpp $1/src/Device.cpp.template
sed 's/CUSTOM_BUILD/'"$HASH"'/g' $1/src/Device.cpp.template > $1/src/Device.cpp
rm $1/src/Device.cpp.template
#setup required environment variables if not already set
. /opt/openrov/cockpit/linux/openrov_config.sh
if test "$ROV_BOARD" = "board25"
then
cat > $1/src/BoardConfig.h << __EOF__
#ifndef __BOARDCONFIG_H__
#define __BOARDCONFOG_H__
#define HAS_STD_CAPE (0)
#define HAS_OROV_CONTROLLERBOARD_25 (1)
#endif
__EOF__
fi
if test "$ROV_BOARD" = "cape"
then
cat > $1/src/BoardConfig.h << __EOF__
#ifndef __BOARDCONFIG_H__
#define __BOARDCONFOG_H__
#define HAS_STD_CAPE (1)
#define HAS_OROV_CONTROLLERBOARD_25 (0)
#endif
__EOF__
fi
EXITCODE=`$BUILD_ATMEGA_CODE 1>&2`
if [ $? -eq 0 ]
then
echo $1
exit 0
fi
error_exit "$LINENO: Compile of the Arduino image failed."
|
BrianAdams/openrov-software
|
linux/arduino/firmware-build.sh
|
Shell
|
mit
| 1,586 |
#!/bin/bash
#
# Run this script to publish a new version of the library to npm. This requires
# that you have a clean working directory and have created a tag that matches
# the version number in package.json.
#
set -o errexit
#
# All profiles to be built. Must correspond to .json files in the config
# directory.
#
PROFILES="ol ol-debug"
#
# Destination directory for builds.
#
BUILDS=dist
#
# URL for canonical repo.
#
REMOTE=https://github.com/openlayers/ol3.git
#
# Display usage and exit.
#
display_usage() {
cat <<-EOF
Usage: ${1} <version>
To publish a new release, update the version number in package.json and
create a tag for the release.
The tag name must match the version number prefixed by a "v" (for example,
version 3.2.1 would be tagged v3.2.1).
The tag must be pushed to ${REMOTE} before the release can be published.
EOF
}
#
# Exit if the current working tree is not clean.
#
assert_clean() {
source `git --exec-path`/git-sh-setup && \
require_clean_work_tree "publish" "Please commit or stash them."
}
#
# Exit if the requested version doesn't match package.json.
#
assert_version_match() {
v=`grep -o '"version":.*' package.json | sed 's/"version": *"\(.*\)",/\1/'`
if test "${1}" != "${v}"; then
echo "Version mismatch: requested '${1}', but package.json specifies '${v}'"
exit 1
fi
}
#
# Build all of the distribution profiles.
#
build_js() {
for p in ${@}; do
echo building ${BUILDS}/${p}.js
node ./tasks/build.js config/${p}.json ${BUILDS}/${p}.js
done
}
build_css() {
./node_modules/clean-css/bin/cleancss css/ol.css -o ${BUILDS}/ol.css
cp css/ol.css ${BUILDS}/ol-debug.css
}
#
# Check out the provided tag. This ensures that the tag has been pushed to
# the canonical remote.
#
checkout_tag() {
git fetch ${REMOTE} refs/tags/v${1}:refs/tags/v${1}
git checkout refs/tags/v${1}
}
#
# Build all profiles and publish.
#
main() {
root=$(cd -P -- "$(dirname -- "${0}")" && pwd -P)/..
cd ${root}
assert_clean
checkout_tag ${1}
assert_version_match ${1}
rm -rf ${BUILDS}
build_js ${PROFILES}
build_css
npm publish
}
if test ${#} -ne 1; then
display_usage ${0}
exit 1
else
main ${1}
fi
|
mechdrew/ol3
|
tasks/publish.sh
|
Shell
|
bsd-2-clause
| 2,205 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "3 nodes, 1 disconnected, add a node"
setup_nodes <<EOF
192.168.20.41
192.168.20.42
192.168.20.43
192.168.20.44
EOF
setup_ctdbd <<EOF
NODEMAP
0 192.168.20.41 0x0 CURRENT RECMASTER
1 192.168.20.42 0x1
2 192.168.20.43 0x0
EOF
required_result 0 <<EOF
WARNING: Node 1 is disconnected. You MUST fix this node manually!
Node 3 is NEW
EOF
simple_test
|
SVoxel/R7800
|
git_home/samba.git/ctdb/tests/tool/ctdb.reloadnodes.021.sh
|
Shell
|
gpl-2.0
| 432 |
#!/bin/bash
# Copyright 2015 Yajie Miao (Carnegie Mellon University)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# Creates a lexicon in which each word is represented by the sequence of its characters (spelling). In theory, we can build such
# a lexicon from the words in the training transcripts. However, for consistent comparison to the phoneme-based system, we use
# the word list from the CMU dictionary. No pronunciations are employed.
# run this from ../
phndir=data/local/dict_phn
dir=data/local/dict_char
mkdir -p $dir
[ -f path.sh ] && . ./path.sh
# Use the word list of the phoneme-based lexicon. Create the lexicon using characters.
cat $phndir/lexicon2_raw_nosil.txt | awk '{print $1}' | \
perl -e 'while(<>){ chop; $str="$_"; foreach $p (split("", $_)) {$str="$str $p"}; print "$str\n";}' \
> $dir/lexicon2_raw_nosil.txt
# Get the set of lexicon units without noises
cut -d' ' -f2- $dir/lexicon2_raw_nosil.txt | tr ' ' '\n' | sort -u > $dir/units_nosil.txt
# Add special noises words & characters into the lexicon. To be consistent with the blank <blk>,
# we add "< >" to the noises characters
(echo '<SPOKEN_NOISE> <SPOKEN_NOISE>'; echo '<UNK> <UNK>'; echo '<NOISE> <NOISE>'; echo '<SPACE> <SPACE>';) | \
cat - $dir/lexicon2_raw_nosil.txt | sort | uniq > $dir/lexicon.txt || exit 1;
# The complete set of lexicon units, indexed by numbers starting from 1
(echo '<NOISE>'; echo '<SPOKEN_NOISE>'; echo '<SPACE>'; echo '<UNK>'; ) | cat - $dir/units_nosil.txt | awk '{print $1 " " NR}' > $dir/units.txt
# Convert character sequences into the corresponding sequences of units indices, encoded by units.txt
utils/sym2int.pl -f 2- $dir/units.txt < $dir/lexicon.txt > $dir/lexicon_numbers.txt
echo "Character-based dictionary (word spelling) preparation succeeded"
|
psibre/eesen
|
asr_egs/wsj/local/wsj_prepare_char_dict.sh
|
Shell
|
apache-2.0
| 2,355 |
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Make sure listed files do not use unwrap() or panic!()
set -o errexit
set -o nounset
set -o pipefail
# cd into repo root to make sure paths work in any case
cd "$(git rev-parse --show-toplevel)"
FILES=("components/compositing/compositor.rs"
"components/constellation/constellation.rs"
"components/constellation/pipeline.rs"
"ports/glutin/lib.rs"
"ports/glutin/window.rs")
# make sure the files exist
ls -1 "${FILES[@]}"
# make sure the files do not contain "unwrap" or "panic!"
! grep --line-number --with-filename "unwrap(\|panic!(" "${FILES[@]}"
|
prampey/servo
|
etc/ci/check_no_panic.sh
|
Shell
|
mpl-2.0
| 804 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command builds and runs a local kubernetes cluster. It's just like
# local-up.sh, but this one launches the three separate binaries.
# You may need to run this as root to allow kubelet to open docker's socket.
DOCKER_OPTS=${DOCKER_OPTS:-""}
DOCKER_NATIVE=${DOCKER_NATIVE:-""}
DOCKER=(docker ${DOCKER_OPTS})
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
cd "${KUBE_ROOT}"
if [ "$(id -u)" != "0" ]; then
echo "WARNING : This script MAY be run as root for docker socket / iptables functionality; if failures occur, retry as root." 2>&1
fi
# Stop right away if the build fails
set -e
source "${KUBE_ROOT}/hack/lib/init.sh"
function usage {
echo "This script starts a local kube cluster. "
echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
echo "Example 2: hack/local-up-cluster.sh (build a local copy of the source)"
}
### Allow user to supply the source directory.
GO_OUT=""
while getopts "o:" OPTION
do
case $OPTION in
o)
echo "skipping build"
echo "using source $OPTARG"
GO_OUT="$OPTARG"
if [ $GO_OUT == "" ]; then
echo "You provided an invalid value for the build output directory."
exit
fi
;;
?)
usage
exit
;;
esac
done
if [ "x$GO_OUT" == "x" ]; then
"${KUBE_ROOT}/hack/build-go.sh" \
cmd/kube-apiserver \
cmd/kube-controller-manager \
cmd/kube-proxy \
cmd/kubectl \
cmd/kubelet \
plugin/cmd/kube-scheduler
else
echo "skipped the build."
fi
function test_docker {
${DOCKER[@]} ps 2> /dev/null 1> /dev/null
if [ "$?" != "0" ]; then
echo "Failed to successfully run 'docker ps', please verify that docker is installed and \$DOCKER_HOST is set correctly."
exit 1
fi
}
# Shut down anyway if there's an error.
set +e
API_PORT=${API_PORT:-8080}
API_HOST=${API_HOST:-127.0.0.1}
# By default only allow CORS for requests on localhost
API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-"/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$"}
KUBELET_PORT=${KUBELET_PORT:-10250}
LOG_LEVEL=${LOG_LEVEL:-3}
CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
RKT_PATH=${RKT_PATH:-""}
RKT_STAGE1_IMAGE=${RKT_STAGE1_IMAGE:-""}
CHAOS_CHANCE=${CHAOS_CHANCE:-0.0}
CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-false}
ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
function test_apiserver_off {
# For the common local scenario, fail fast if server is already running.
# this can happen if you run local-up-cluster.sh twice and kill etcd in between.
curl $API_HOST:$API_PORT
if [ ! $? -eq 0 ]; then
echo "API SERVER port is free, proceeding..."
else
echo "ERROR starting API SERVER, exiting. Some host on $API_HOST is serving already on $API_PORT"
exit 1
fi
}
function detect_binary {
# Detect the OS name/arch so that we can find our binary
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
echo "Unsupported host arch. Must be x86_64, 386, arm, s390x or ppc64le." >&2
exit 1
;;
esac
GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
}
cleanup_dockerized_kubelet()
{
if [[ -e $KUBELET_CIDFILE ]]; then
docker kill $(<$KUBELET_CIDFILE) > /dev/null
rm -f $KUBELET_CIDFILE
fi
}
cleanup()
{
echo "Cleaning up..."
# Check if the API server is still running
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS}
# Check if the controller-manager is still running
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS}
if [[ -n "$DOCKERIZE_KUBELET" ]]; then
cleanup_dockerized_kubelet
else
# Check if the kubelet is still running
[[ -n "${KUBELET_PID-}" ]] && KUBELET_PIDS=$(pgrep -P ${KUBELET_PID} ; ps -o pid= -p ${KUBELET_PID})
[[ -n "${KUBELET_PIDS-}" ]] && sudo kill ${KUBELET_PIDS}
fi
# Check if the proxy is still running
[[ -n "${PROXY_PID-}" ]] && PROXY_PIDS=$(pgrep -P ${PROXY_PID} ; ps -o pid= -p ${PROXY_PID})
[[ -n "${PROXY_PIDS-}" ]] && sudo kill ${PROXY_PIDS}
# Check if the scheduler is still running
[[ -n "${SCHEDULER_PID-}" ]] && SCHEDULER_PIDS=$(pgrep -P ${SCHEDULER_PID} ; ps -o pid= -p ${SCHEDULER_PID})
[[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill ${SCHEDULER_PIDS}
# Check if the etcd is still running
[[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
[[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
exit 0
}
function startETCD {
echo "Starting etcd"
kube::etcd::start
}
function set_service_accounts {
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-false}
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-"/tmp/kube-serviceaccount.key"}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
}
function start_apiserver {
# Admission Controllers to invoke prior to persisting objects in cluster
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
else
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
fi
# This is the default dir and filename where the apiserver will generate a self-signed cert
# which should be able to be used as the CA to verify itself
CERT_DIR=/var/run/kubernetes
ROOT_CA_FILE=$CERT_DIR/apiserver.crt
priv_arg=""
if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
priv_arg="--allow-privileged "
fi
runtime_config=""
if [[ -n "${RUNTIME_CONFIG}" ]]; then
runtime_config="--runtime-config=${RUNTIME_CONFIG}"
fi
APISERVER_LOG=/tmp/kube-apiserver.log
sudo -E "${GO_OUT}/kube-apiserver" ${priv_arg} ${runtime_config}\
--v=${LOG_LEVEL} \
--cert-dir="${CERT_DIR}" \
--service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
--service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
--admission-control="${ADMISSION_CONTROL}" \
--insecure-bind-address="${API_HOST}" \
--insecure-port="${API_PORT}" \
--etcd-servers="http://127.0.0.1:4001" \
--service-cluster-ip-range="10.0.0.0/24" \
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
APISERVER_PID=$!
# Wait for kube-apiserver to come up before launching the rest of the components.
echo "Waiting for apiserver to come up"
kube::util::wait_for_url "http://${API_HOST}:${API_PORT}/api/v1/pods" "apiserver: " 1 10 || exit 1
}
function start_controller_manager {
CTLRMGR_LOG=/tmp/kube-controller-manager.log
sudo -E "${GO_OUT}/kube-controller-manager" \
--v=${LOG_LEVEL} \
--service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
--root-ca-file="${ROOT_CA_FILE}" \
--enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
--master="${API_HOST}:${API_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
CTLRMGR_PID=$!
}
function start_kubelet {
KUBELET_LOG=/tmp/kubelet.log
mkdir -p /var/lib/kubelet
if [[ -z "${DOCKERIZE_KUBELET}" ]]; then
# On selinux enabled systems, it might
# require to relabel /var/lib/kubelet
if which selinuxenabled &> /dev/null && \
selinuxenabled && \
which chcon > /dev/null ; then
if [[ ! $(ls -Zd /var/lib/kubelet) =~ system_u:object_r:svirt_sandbox_file_t:s0 ]] ; then
echo "Applying SELinux label to /var/lib/kubelet directory."
if ! chcon -R system_u:object_r:svirt_sandbox_file_t:s0 /var/lib/kubelet; then
echo "Failed to apply selinux label to /var/lib/kubelet."
fi
fi
fi
sudo -E "${GO_OUT}/kubelet" ${priv_arg}\
--v=${LOG_LEVEL} \
--chaos-chance="${CHAOS_CHANCE}" \
--container-runtime="${CONTAINER_RUNTIME}" \
--rkt-path="${RKT_PATH}" \
--rkt-stage1-image="${RKT_STAGE1_IMAGE}" \
--hostname-override="127.0.0.1" \
--address="127.0.0.1" \
--api-servers="${API_HOST}:${API_PORT}" \
--cpu-cfs-quota=${CPU_CFS_QUOTA} \
--cluster-dns="127.0.0.1" \
--port="$KUBELET_PORT" >"${KUBELET_LOG}" 2>&1 &
KUBELET_PID=$!
else
# Docker won't run a container with a cidfile (container id file)
# unless that file does not already exist; clean up an existing
# dockerized kubelet that might be running.
cleanup_dockerized_kubelet
docker run \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:rw \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw,z \
--net=host \
--privileged=true \
-i \
--cidfile=$KUBELET_CIDFILE \
gcr.io/google_containers/kubelet \
/kubelet --v=3 --containerized ${priv_arg}--chaos-chance="${CHAOS_CHANCE}" --hostname-override="127.0.0.1" --address="127.0.0.1" --api-servers="${API_HOST}:${API_PORT}" --port="$KUBELET_PORT" --resource-container="" &> $KUBELET_LOG &
fi
}
function start_kubeproxy {
PROXY_LOG=/tmp/kube-proxy.log
sudo -E "${GO_OUT}/kube-proxy" \
--v=${LOG_LEVEL} \
--hostname-override="127.0.0.1" \
--master="http://${API_HOST}:${API_PORT}" >"${PROXY_LOG}" 2>&1 &
PROXY_PID=$!
SCHEDULER_LOG=/tmp/kube-scheduler.log
sudo -E "${GO_OUT}/kube-scheduler" \
--v=${LOG_LEVEL} \
--master="http://${API_HOST}:${API_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
SCHEDULER_PID=$!
}
function print_success {
cat <<EOF
Local Kubernetes cluster is running. Press Ctrl-C to shut it down.
Logs:
${APISERVER_LOG}
${CTLRMGR_LOG}
${PROXY_LOG}
${SCHEDULER_LOG}
${KUBELET_LOG}
To start using your cluster, open up another terminal/tab and run:
cluster/kubectl.sh config set-cluster local --server=http://${API_HOST}:${API_PORT} --insecure-skip-tls-verify=true
cluster/kubectl.sh config set-context local --cluster=local
cluster/kubectl.sh config use-context local
cluster/kubectl.sh
EOF
}
test_docker
test_apiserver_off
### IF the user didn't supply an output/ for the build... Then we detect.
if [ "$GO_OUT" == "" ]; then
detect_binary
fi
echo "Detected host and ready to start services. Doing some housekeeping first..."
echo "Using GO_OUT $GO_OUT"
KUBELET_CIDFILE=/tmp/kubelet.cid
trap cleanup EXIT
echo "Starting services now!"
startETCD
set_service_accounts
start_apiserver
start_controller_manager
start_kubelet
start_kubeproxy
print_success
while true; do sleep 1; done
|
Samsung-AG/kubernetes
|
hack/local-up-cluster.sh
|
Shell
|
apache-2.0
| 12,275 |
#!/bin/sh
# Copyright 2008 Lukas Sandström <[email protected]>
#
# AppendPatch - A script to be used together with ExternalEditor
# for Mozilla Thunderbird to properly include patches inline in e-mails.
# ExternalEditor can be downloaded at http://globs.org/articles.php?lng=en&pg=2
CONFFILE=~/.appprc
SEP="-=-=-=-=-=-=-=-=-=# Don't remove this line #=-=-=-=-=-=-=-=-=-"
if [ -e "$CONFFILE" ] ; then
LAST_DIR=$(grep -m 1 "^LAST_DIR=" "${CONFFILE}"|sed -e 's/^LAST_DIR=//')
cd "${LAST_DIR}"
else
cd > /dev/null
fi
PATCH=$(zenity --file-selection)
if [ "$?" != "0" ] ; then
#zenity --error --text "No patchfile given."
exit 1
fi
cd - > /dev/null
SUBJECT=$(sed -n -e '/^Subject: /p' "${PATCH}")
HEADERS=$(sed -e '/^'"${SEP}"'$/,$d' $1)
BODY=$(sed -e "1,/${SEP}/d" $1)
CMT_MSG=$(sed -e '1,/^$/d' -e '/^---$/,$d' "${PATCH}")
DIFF=$(sed -e '1,/^---$/d' "${PATCH}")
CCS=`echo -e "$CMT_MSG\n$HEADERS" | sed -n -e 's/^Cc: \(.*\)$/\1,/gp' \
-e 's/^Signed-off-by: \(.*\)/\1,/gp'`
echo "$SUBJECT" > $1
echo "Cc: $CCS" >> $1
echo "$HEADERS" | sed -e '/^Subject: /d' -e '/^Cc: /d' >> $1
echo "$SEP" >> $1
echo "$CMT_MSG" >> $1
echo "---" >> $1
if [ "x${BODY}x" != "xx" ] ; then
echo >> $1
echo "$BODY" >> $1
echo >> $1
fi
echo "$DIFF" >> $1
LAST_DIR=$(dirname "${PATCH}")
grep -v "^LAST_DIR=" "${CONFFILE}" > "${CONFFILE}_"
echo "LAST_DIR=${LAST_DIR}" >> "${CONFFILE}_"
mv "${CONFFILE}_" "${CONFFILE}"
|
overtherain/scriptfile
|
tool-kit/git-2.1.2/contrib/thunderbird-patch-inline/appp.sh
|
Shell
|
mit
| 1,409 |
#!/bin/bash
#
# This is the top-level script we give to Kokoro as the entry point for
# running the "pull request" project:
#
# This script selects a specific Dockerfile (for building a Docker image) and
# a script to run inside that image. Then we delegate to the general
# build_and_run_docker.sh script.
# Change to repo root
cd $(dirname $0)/../../..
export DOCKERFILE_DIR=kokoro/linux/64-bit
export DOCKER_RUN_SCRIPT=kokoro/linux/pull_request_in_docker.sh
export OUTPUT_DIR=testoutput
export TEST_SET="python"
./kokoro/linux/build_and_run_docker.sh
|
nwjs/chromium.src
|
third_party/protobuf/kokoro/linux/python/build.sh
|
Shell
|
bsd-3-clause
| 557 |
#!/bin/sh
# The user can request that we initialize concierge as
# part of startup by holding down hotkeys 1 and 3 during
# boot.
# The user can request that we remove all existing BUG
# applications and also initialize concierge as part of
# startup by holding down hotkeys 2 and 4 during boot.
# The bugnav driver has a proc file that reports the
# current state of the BUGbase buttons, so we use that
# to figure out whether we need to do anything special
#Not sure how much of this is actually accurate for 2.0 since the pMEA=>openjdk, concierge=>felix, new button interface. commenting out for now
#INIT_BUTTON_CHECK=$(egrep "M1 1|M2 0|M3 1|M4 0" /proc/bugnav|wc -l)
#RESET_BUTTON_CHECK=$(egrep "M1 0|M2 1|M3 0|M4 1" /proc/bugnav|wc -l)
#RUN_DIR=/usr/share/java
#INIT_ARG=-Dosgi.init=true
#
#CVM_DIR=/usr/lib/jvm/phoneme-advanced-personal-debug
#CVM_PATH=/usr/bin/java
#CVM_PARAMS=
#
#if [ -n "$WANT_DEBUG" ]; then
# echo 'Running debug CVM'
# # example to stop before running any code and wait for debugger client to connect to us on port 5000
# CVM_PARAMS='-Xdebug -Xrunjdwp:transport=dt_socket,server=y,address=5000 '
#elif [ -n "$WANT_PROFILE" ]; then
# # this is an example profile run; change it to '-agentlib:jvmtihprof=help' to see all the options
# CVM_PARAMS="-agentlib:jvmtihprof=heap=all,cpu=samples,file=/tmp/profile.txt -Xbootclasspath/a:$CVM_DIR/lib/java_crw_demo.jar"
#fi
#
#if [ $INIT_BUTTON_CHECK = 4 ]; then
# echo 'init request by user'
# INIT=$INIT_ARG
#elif [ $RESET_BUTTON_CHECK = 4 ]; then
# echo 'reset requested by user'
# INIT=$INIT_ARG
# echo 'removing existing apps'
# /bin/rm -f $RUN_DIR/apps/*.jar
#else
# echo 'normal startup'
# INIT=
#fi
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib/jni
export DISPLAY=:0.0
/usr/bin/java -Xmx64M -Dfelix.config.properties=file:///usr/share/java/conf/config.properties -Dfreetype.font=/usr/share/fonts/ttf/LiberationSans-Regular.ttf -Djava.library.path=/usr/lib/jni -jar /usr/share/java/felix.jar
#$CVM_PATH $CVM_PARAMS -Xmx64M -cp concierge.jar -Dfreetype.font=/usr/share/fonts/ttf/LiberationSans-Regular.ttf -Djava.library.path=/usr/lib/jni -Dxargs=$RUN_DIR/init.xargs $INIT ch.ethz.iks.concierge.framework.Framework
|
xifengchuo/openembedded
|
recipes/felix/felix-init/start.sh
|
Shell
|
mit
| 2,212 |
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 8010125
# @summary keytool -importkeystore could create a pkcs12 keystore with
# different storepass and keypass
#
if [ "${TESTJAVA}" = "" ] ; then
JAVAC_CMD=`which javac`
TESTJAVA=`dirname $JAVAC_CMD`/..
fi
# set platform-dependent variables
OS=`uname -s`
case "$OS" in
Windows_* )
FS="\\"
;;
* )
FS="/"
;;
esac
LANG=C
KT=$TESTJAVA${FS}bin${FS}keytool
# Part 1: JKS keystore with same storepass and keypass
rm jks 2> /dev/null
$KT -genkeypair -keystore jks -storetype jks -alias me -dname CN=Me \
-keyalg rsa -storepass pass1111 -keypass pass1111 || exit 11
# Cannot only change storepass
rm p12 2> /dev/null
$KT -importkeystore -noprompt \
-srcstoretype jks -srckeystore jks -destkeystore p12 -deststoretype pkcs12 \
-srcstorepass pass1111 \
-deststorepass pass2222 \
&& exit 12
# You can keep storepass unchanged
rm p12 2> /dev/null
$KT -importkeystore -noprompt \
-srcstoretype jks -srckeystore jks -destkeystore p12 -deststoretype pkcs12 \
-srcstorepass pass1111 \
-deststorepass pass1111 \
|| exit 13
$KT -certreq -storetype pkcs12 -keystore p12 -alias me \
-storepass pass1111 -keypass pass1111 || exit 14
# Or change storepass and keypass both
rm p12 2> /dev/null
$KT -importkeystore -noprompt \
-srcstoretype jks -srckeystore jks -destkeystore p12 -deststoretype pkcs12 \
-srcstorepass pass1111 \
-deststorepass pass2222 -destkeypass pass2222 \
|| exit 15
$KT -certreq -storetype pkcs12 -keystore p12 -alias me \
-storepass pass2222 -keypass pass2222 || exit 16
# Part 2: JKS keystore with different storepass and keypass
# Must import by alias (-srckeypass is not available when importing all)
rm jks 2> /dev/null
$KT -genkeypair -keystore jks -storetype jks -alias me -dname CN=Me \
-keyalg rsa -storepass pass1111 -keypass pass2222 || exit 21
# Can use old keypass as new storepass so new storepass and keypass are same
rm p12 2> /dev/null
$KT -importkeystore -noprompt -srcalias me \
-srcstoretype jks -srckeystore jks -destkeystore p12 -deststoretype pkcs12 \
-srcstorepass pass1111 -srckeypass pass2222 \
-deststorepass pass2222 \
|| exit 22
$KT -certreq -storetype pkcs12 -keystore p12 -alias me \
-storepass pass2222 -keypass pass2222 || exit 23
# Or specify both storepass and keypass to brand new ones
rm p12 2> /dev/null
$KT -importkeystore -noprompt -srcalias me \
-srcstoretype jks -srckeystore jks -destkeystore p12 -deststoretype pkcs12 \
-srcstorepass pass1111 -srckeypass pass2222 \
-deststorepass pass3333 -destkeypass pass3333 \
|| exit 24
$KT -certreq -storetype pkcs12 -keystore p12 -alias me \
-storepass pass3333 -keypass pass3333 || exit 25
# Anyway you cannot make new storepass and keypass different
rm p12 2> /dev/null
$KT -importkeystore -noprompt -srcalias me \
-srcstoretype jks -srckeystore jks -destkeystore p12 -deststoretype pkcs12 \
-srcstorepass pass1111 -srckeypass pass2222 \
-deststorepass pass1111 \
&& exit 26
exit 0
|
stain/jdk8u
|
test/sun/security/tools/keytool/p12importks.sh
|
Shell
|
gpl-2.0
| 4,061 |
#!/bin/sh
what=$1
CXX=g++
prefix="rvalgrind --tool=callgrind --callgrind-out-file=callgrind.out.$1.%p --dump-instr=yes"
. ${ROOTSYS}/bin/thisroot.sh
set -x
function run {
echo "Data Class: $1" | tee $1.1e6.$$.speedlog | tee $1.1e8.$$.speedlog
${CXX} -o speedtest speedtest.cxx `root-config --cflags --libs` -O3 "-DSTATCLASSES=$1"
# ${prefix} ./speedtest 1e6 > $1.1e6.$$.log
# rvalgrind --tool=callgrind --callgrind-out-file=callgrind.out.$1.%p --dump-instr=yes ./speedtest 1e6 > $1.1e6.$$.vallog
./speedtest 1e6 $what >> $1.1e6.$$.speedlog
./speedtest 1e8 $what >> $1.1e8.$$.speedlog
}
run THistStatContent
run "THistStatContent,THistStatUncertainty"
run "THistStatContent,THistStatUncertainty,THistStatTotalSumOfWeights"
run "THistStatContent,THistStatUncertainty,THistStatTotalSumOfWeights,THistStatTotalSumOfSquaredWeights"
run "THistStatContent,THistStatUncertainty,THistStatTotalSumOfWeights,THistStatTotalSumOfSquaredWeights,THistDataMomentUncert"
echo 'Not running with THistDataRuntime'
# run THistDataRuntime
|
lgiommi/root
|
hist/hist/v7/test/runall.sh
|
Shell
|
lgpl-2.1
| 1,043 |
#!/bin/sh
# Package
PACKAGE="gateone"
DNAME="GateOne"
# Others
INSTALL_DIR="/usr/local/${PACKAGE}"
PYTHON_DIR="/usr/local/python"
PATH="${INSTALL_DIR}/bin:${INSTALL_DIR}/env/bin:${PYTHON_DIR}/bin:${PATH}"
PYTHON="${INSTALL_DIR}/env/bin/python"
GATEONE="${INSTALL_DIR}/env/bin/gateone"
SETTINGS_DIR="${INSTALL_DIR}/var/conf.d"
PID_FILE="${INSTALL_DIR}/var/gateone.pid"
USER="gateone"
start_daemon ()
{
# Copy certificate
cp /usr/syno/etc/ssl/ssl.crt/server.crt /usr/syno/etc/ssl/ssl.key/server.key ${INSTALL_DIR}/ssl/
chown ${USER} ${INSTALL_DIR}/ssl/*
su ${USER} -c "PATH=${PATH} nohup ${PYTHON} ${GATEONE} --settings_dir=${SETTINGS_DIR} > ${INSTALL_DIR}/var/gateone_startup.log &"
}
stop_daemon ()
{
su ${USER} -c "PATH=${PATH} ${PYTHON} ${GATEONE} --kill --settings_dir=${SETTINGS_DIR}"
wait_for_status 1 20 || kill -9 `cat ${PID_FILE}`
rm -f ${PID_FILE}
}
daemon_status ()
{
if [ -f ${PID_FILE} ] && kill -0 `cat ${PID_FILE}` > /dev/null 2>&1; then
return
fi
rm -f ${PID_FILE}
return 1
}
wait_for_status ()
{
counter=$2
while [ ${counter} -gt 0 ]; do
daemon_status
[ $? -eq $1 ] && return
let counter=counter-1
sleep 1
done
return 1
}
case $1 in
start)
if daemon_status; then
echo ${DNAME} is already running
else
echo Starting ${DNAME} ...
start_daemon
fi
;;
stop)
if daemon_status; then
echo Stopping ${DNAME} ...
stop_daemon
else
echo ${DNAME} is not running
fi
;;
status)
if daemon_status; then
echo ${DNAME} is running
exit 0
else
echo ${DNAME} is not running
exit 1
fi
;;
log)
echo ${LOG_FILE}
;;
*)
exit 1
;;
esac
|
mirweb/spksrc
|
spk/gateone/src/dsm-control.sh
|
Shell
|
bsd-3-clause
| 1,902 |
#!/bin/sh
test_description='git apply with weird postimage filenames'
. ./test-lib.sh
test_expect_success 'setup' '
vector=$TEST_DIRECTORY/t4135 &&
test_tick &&
git commit --allow-empty -m preimage &&
git tag preimage &&
reset_preimage() {
git checkout -f preimage^0 &&
git read-tree -u --reset HEAD &&
git update-index --refresh
} &&
test_when_finished "rm -f \"tab embedded.txt\"" &&
test_when_finished "rm -f '\''\"quoteembedded\".txt'\''" &&
if touch -- "tab embedded.txt" '\''"quoteembedded".txt'\''
then
test_set_prereq FUNNYNAMES
fi
'
try_filename() {
desc=$1
postimage=$2
prereq=${3:-}
exp1=${4:-success}
exp2=${5:-success}
exp3=${6:-success}
test_expect_$exp1 $prereq "$desc, git-style file creation patch" "
echo postimage >expected &&
reset_preimage &&
rm -f '$postimage' &&
git apply -v \"\$vector\"/'git-$desc.diff' &&
test_cmp expected '$postimage'
"
test_expect_$exp2 $prereq "$desc, traditional patch" "
echo postimage >expected &&
reset_preimage &&
echo preimage >'$postimage' &&
git apply -v \"\$vector\"/'diff-$desc.diff' &&
test_cmp expected '$postimage'
"
test_expect_$exp3 $prereq "$desc, traditional file creation patch" "
echo postimage >expected &&
reset_preimage &&
rm -f '$postimage' &&
git apply -v \"\$vector\"/'add-$desc.diff' &&
test_cmp expected '$postimage'
"
}
try_filename 'plain' 'postimage.txt'
try_filename 'with spaces' 'post image.txt'
try_filename 'with tab' 'post image.txt' FUNNYNAMES
try_filename 'with backslash' 'post\image.txt' BSLASHPSPEC
try_filename 'with quote' '"postimage".txt' FUNNYNAMES success failure success
test_expect_success 'whitespace-damaged traditional patch' '
echo postimage >expected &&
reset_preimage &&
rm -f postimage.txt &&
git apply -v "$vector/damaged.diff" &&
test_cmp expected postimage.txt
'
test_expect_success 'traditional patch with colon in timezone' '
echo postimage >expected &&
reset_preimage &&
rm -f "post image.txt" &&
git apply "$vector/funny-tz.diff" &&
test_cmp expected "post image.txt"
'
test_expect_success 'traditional, whitespace-damaged, colon in timezone' '
echo postimage >expected &&
reset_preimage &&
rm -f "post image.txt" &&
git apply "$vector/damaged-tz.diff" &&
test_cmp expected "post image.txt"
'
test_done
|
overtherain/scriptfile
|
tool-kit/git-2.1.2/t/t4135-apply-weird-filenames.sh
|
Shell
|
mit
| 2,336 |
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCKER_OPTS=${1:-""}
DOCKER_CONFIG=/opt/kubernetes/cfg/docker
cat <<EOF >$DOCKER_CONFIG
DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s overlay --selinux-enabled=false ${DOCKER_OPTS}"
EOF
cat <<EOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target flannel.service
Requires=flannel.service
[Service]
Type=notify
EnvironmentFile=-/run/flannel/docker
EnvironmentFile=-/opt/kubernetes/cfg/docker
WorkingDirectory=/opt/kubernetes/bin
ExecStart=/opt/kubernetes/bin/dockerd \$DOCKER_OPT_BIP \$DOCKER_OPT_MTU \$DOCKER_OPTS
LimitNOFILE=1048576
LimitNPROC=1048576
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
|
childsb/origin
|
vendor/k8s.io/kubernetes/cluster/centos/node/scripts/docker.sh
|
Shell
|
apache-2.0
| 1,410 |
#!/bin/sh
# -- check for root --
if [ "`id -u`" != "0" ]; then
echo "extract_dists.sh: sorry, this must be done as root." 1>&2
exit 1
fi
# -- tear down --
umount -F /chroots/chroot-fbsd-build-ports-head-armv7/etc/fstab -a || exit 1
if [ "$(uname -p)" != "armv7" ]; then
binmiscctl remove armelf || exit 1
fi
|
guyyur/configs
|
chroot-fbsd-build-ports-head-armv7/FreeBSD/tear_down_chroot.sh
|
Shell
|
isc
| 318 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-AutoLayoutLint_Example/AutoLayoutLint.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-AutoLayoutLint_Example/AutoLayoutLint.framework"
fi
|
ypresto/AutoLayoutLint
|
Example/Pods/Target Support Files/Pods-AutoLayoutLint_Example/Pods-AutoLayoutLint_Example-frameworks.sh
|
Shell
|
mit
| 3,576 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods-DemoTests/Nimble.framework'
install_framework 'Pods-DemoTests/OHHTTPStubs.framework'
install_framework 'Pods-DemoTests/Quick.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods-DemoTests/Nimble.framework'
install_framework 'Pods-DemoTests/OHHTTPStubs.framework'
install_framework 'Pods-DemoTests/Quick.framework'
fi
|
tuyenbq/Moya
|
Demo/Pods/Target Support Files/Pods-DemoTests/Pods-DemoTests-frameworks.sh
|
Shell
|
mit
| 2,814 |
DEPENDS="python python-colorama python-sh python-termcolor fortune-mod"
|
mpardalos/dotfiles
|
motd/install.sh
|
Shell
|
mit
| 72 |
#!/bin/sh
cd /Users/yannis/railsapps/biology14_staging/current;
/Users/yannis/.rbenv/shims/god;
/Users/yannis/.rbenv/shims/god load config/unicorn_staging.god && /Users/yannis/.rbenv/shims/god start biology14_staging_unicorn;
|
yannis/biologyconf
|
config/start_god_staging.sh
|
Shell
|
mit
| 226 |
#!/bin/bash
clear
# or use wm array -- add any that need to be recognized
wms=( 2bwm 2wm 9wm aewm afterstep ahwm alopex amiwm antiwm awesome blackbox bspwm catwm clfswm ctwm cwm dminiwm dragonflywm dwm echinus \
euclid-wm evilpoison evilwm fluxbox flwm fvwm-crystal gnome goomwwm hcwm herbstluftwm i3 icewm jwm karmen larswm lwm matwm2 mcwm monsterwm \
musca notion nwm olwm openbox oroborus pekwm ratpoison sapphire sawfish sscrotwm sithwm smallwm snapwm spectrwm stumpwm subtle tfwm tinywm tritium twm \
uwm vtwm w9wm weewm wind windowlab wm2 wmaker wmfs wmii wmx xfwm4 xmonad xoat yeahwm )
# define colors for color-echo
red="\e[31m"
grn="\e[32m"
ylw="\e[33m"
cyn="\e[36m"
blu="\e[34m"
prp="\e[35m"
bprp="\e[35;1m"
rst="\e[0m"
color-echo()
{ # print with colors
echo -e " $red$1: $rst$2"
}
print-kernel()
{
color-echo 'KL' "$(uname -smr)"
}
print-uptime()
{
up=$(</proc/uptime)
up=${up//.*} # string before first . is seconds
days=$((${up}/86400)) # seconds divided by 86400 is days
hours=$((${up}/3600%24)) # seconds divided by 3600 mod 24 is hours
mins=$((${up}/60%60)) # seconds divided by 60 mod 60 is mins
color-echo "UP" $days'd '$hours'h '$mins'm'
}
print-shell() {
color-echo 'SH' "$(echo $SHELL)"
}
print-cpu()
{
arm=$(grep ARM /proc/cpuinfo) # ARM procinfo uses different format
if [[ "$arm" != "" ]]; then
cpu=$(grep -m1 -i 'Processor' /proc/cpuinfo)
else
cpu=$(grep -m1 -i 'model name' /proc/cpuinfo)
fi
color-echo 'CP' "${cpu#*: }" # everything after colon is processor name
}
print-gpu()
{
gpu=$(lspci | grep VGA | awk -F ': ' '{print $2}' | sed 's/(rev ..)//g')
color-echo 'GP' "$gpu"
}
print-packages()
{
packages=$(dpkg --get-selections | grep -v "deinstall" | wc -l)
color-echo 'PKG' "$packages"
}
print-wm()
{
for wm in ${wms[@]}; do # pgrep through wmname array
pid=$(pgrep -x -u $USER $wm) # if found, this wmname has running process
if [[ "$pid" ]]; then
color-echo 'WM' $wm
break
fi
done
}
print-distro()
{
[[ -e /etc/os-release ]] && source /etc/os-release
if [[ -n "$PRETTY_NAME" ]]; then
color-echo 'OS' "$PRETTY_NAME"
else
color-echo 'OS' "not found"
fi
}
echo
echo -en " █▀▀▀▀▀▀▀▀█ | " && print-distro
echo -en " █ █ | " && print-kernel
echo -en " █ █ █ █ | " && print-cpu
echo -en " █ █ | " && print-shell
echo -en " ▀█▄▄▄▄▄▄█▀ | " && print-uptime
echo
echo
#print-colors
|
nesmeck/DotFiles
|
syswh.sh
|
Shell
|
mit
| 2,546 |
FILE=$1
FOLDER=`dirname "${FILE}"`
FOLDER=${FOLDER##"./"}
export FOLDER
export FILE
#echo "RUNNING WITH ${FILE} FROM FOLDER ${FOLDER}"
cat "${FILE}" | grep -v "#" | perl -ane 'BEGIN {$folder = $ENV{"FOLDER"}; $file = $ENV{"FILE"}; $chrom; $max = -1; $min = 1_000_000_000; print "==> $file <==\n"; } END { print "$chrom\t$min\t$max\tnone\t$folder\n\n" } if ( $F[2] < $F[3] ) { $chrom = $F[0]; $min = $F[2] if ( $F[2] < $min ); $max = $F[3] if $F[3] > $max; }'
|
sauloal/perlscripts
|
Bio/progs_parsers/gaggles/report/getRegio.sh
|
Shell
|
mit
| 463 |
curl -XDELETE 'http://localhost:9200/throwtable'
curl http://localhost:9200/throwtable -X PUT -d '
{
"mappings": {
"implementation": {
"properties": {
"algorithm": {
"type": "string",
"index": "not_analyzed"
}
}
}
}
}'
elasticdump \
--input=version4.3/elasticsearch_algorithm_v4.3.json \
--output=http://localhost:9200/throwtable \
--type=data
elasticdump \
--input=version4.3/elasticsearch_category_v4.3.json\
--output=http://localhost:9200/throwtable \
--type=data
elasticdump \
--input=version4.3/elasticsearch_implementation_v4.3.json \
--output=http://localhost:9200/throwtable \
--type=data
elasticdump \
--input=elasticsearch_implementation_npm_inv_1.json \
--output=http://localhost:9200/throwtable \
--type=data
|
xkxx/algodb
|
elasticsearch_dumps/restore_elasticsearch.sh
|
Shell
|
mit
| 810 |
#!/bin/bash
set -e
mkdir -p /home/osgeo4a
IFS=' ' read -ra arches_array <<< "${ARCHES}"
for ARCH in "${arches_array[@]}"
do
export ARCH=$ARCH
/usr/src/distribute.sh -m qgis
mv /usr/src/build/stage/$ARCH /home/osgeo4a
done
rm -rf /usr/src
|
opengisch/OSGeo4A
|
scripts/build_arches.sh
|
Shell
|
mit
| 243 |
## Images
cp -r _assets/_images assets/img/
## Files
cp -r _assets/_files assets/files/
|
lchski/lucascherkewski.com
|
copy-assets.sh
|
Shell
|
mit
| 89 |
#!/bin/sh
wget http://www.cs.columbia.edu/~blei/lda-c/ap.tgz
tar xzf ap.tgz
|
luispedro/BuildingMachineLearningSystemsWithPython
|
ch04/data/download_ap.sh
|
Shell
|
mit
| 76 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-127-1
#
# Security announcement date: 2015-01-03 00:00:00 UTC
# Script generation date: 2017-01-01 21:08:51 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - pyyaml:3.09-5+deb6u1
#
# Last versions recommanded by security team:
# - pyyaml:3.09-5+deb6u1
#
# CVE List:
# - CVE-2014-9130
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade pyyaml=3.09-5+deb6u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2015/DLA-127-1.sh
|
Shell
|
mit
| 606 |
#!/usr/bin/env bash
PACKAGE_NAME="chirpstack-gateway-bridge"
PACKAGE_VERSION=$1
REV="r1"
PACKAGE_URL="https://artifacts.chirpstack.io/downloads/chirpstack-gateway-bridge/chirpstack-gateway-bridge_${PACKAGE_VERSION}_linux_mips.tar.gz"
DIR=`dirname $0`
PACKAGE_DIR="${DIR}/package"
# Cleanup
rm -rf $PACKAGE_DIR
# CONTROL
mkdir -p $PACKAGE_DIR/CONTROL
cat > $PACKAGE_DIR/CONTROL/control << EOF
Package: $PACKAGE_NAME
Version: $PACKAGE_VERSION-$REV
Architecture: mips_24kc
Maintainer: Orne Brocaar <[email protected]>
Priority: optional
Section: network
Source: N/A
Description: ChirpStack Gateway Bridge
EOF
cat > $PACKAGE_DIR/CONTROL/postinst << EOF
#!/bin/sh
/etc/init.d/chirpstack-gateway-bridge enable
EOF
chmod 755 $PACKAGE_DIR/CONTROL/postinst
cat > $PACKAGE_DIR/CONTROL/conffiles << EOF
/etc/$PACKAGE_NAME/$PACKAGE_NAME.toml
EOF
# Files
mkdir -p $PACKAGE_DIR/opt/$PACKAGE_NAME
mkdir -p $PACKAGE_DIR/etc/$PACKAGE_NAME
mkdir -p $PACKAGE_DIR/etc/init.d
cp files/$PACKAGE_NAME.toml $PACKAGE_DIR/etc/$PACKAGE_NAME/$PACKAGE_NAME.toml
cp files/$PACKAGE_NAME.init $PACKAGE_DIR/etc/init.d/$PACKAGE_NAME
wget -P $PACKAGE_DIR/opt/$PACKAGE_NAME $PACKAGE_URL
tar zxf $PACKAGE_DIR/opt/$PACKAGE_NAME/*.tar.gz -C $PACKAGE_DIR/opt/$PACKAGE_NAME
rm $PACKAGE_DIR/opt/$PACKAGE_NAME/*.tar.gz
# Package
opkg-build -c -o root -g root $PACKAGE_DIR
# Cleanup
rm -rf $PACKAGE_DIR
|
brocaar/lora-semtech-bridge
|
packaging/vendor/dragino/LG308/package.sh
|
Shell
|
mit
| 1,369 |
#!/bin/bash
# make && make tests &&
./_rel/cardgames_release/bin/cardgames_release console
|
go717franciswang/cardgames
|
run_server.sh
|
Shell
|
mit
| 93 |
#/bin/bash
### gulp 初始化
echo 'init gulp'
npm install gulp --save-dev
npm install gulp-uglify --save-dev
npm install gulp-plumber --save-dev
npm install gulp-less --save-dev
npm install gulp-minify-css --save-dev
npm install gulp-rename --save-dev
npm install gulp-util --save-dev
echo 'gulp 模块安装完毕'
|
JackPu/albums
|
projects/gulp.sh
|
Shell
|
mit
| 318 |
#!/usr/bin/env bash
cat << EOF > /var/www/public/index.php
<?php
echo 'Hello World !';
EOF
|
phpmauritiusug/2016-DevConMru
|
PHP-01-1-JumpStart/doRunInVagrantSSH.sh
|
Shell
|
mit
| 91 |
#!/bin/bash
set -e
create_pid_dir() {
mkdir -p /run/apt-cacher-ng
chmod -R 0755 /run/apt-cacher-ng
chown ${APT_CACHER_NG_USER}:${APT_CACHER_NG_USER} /run/apt-cacher-ng
}
create_cache_dir() {
mkdir -p ${APT_CACHER_NG_CACHE_DIR}
chmod -R 0755 ${APT_CACHER_NG_CACHE_DIR}
chown -R ${APT_CACHER_NG_USER}:root ${APT_CACHER_NG_CACHE_DIR}
}
create_log_dir() {
mkdir -p ${APT_CACHER_NG_LOG_DIR}
chmod -R 0755 ${APT_CACHER_NG_LOG_DIR}
chown -R ${APT_CACHER_NG_USER}:${APT_CACHER_NG_USER} ${APT_CACHER_NG_LOG_DIR}
}
create_pid_dir
create_cache_dir
create_log_dir
# allow arguments to be passed to apt-cacher-ng
if [[ ${1:0:1} = '-' ]]; then
EXTRA_ARGS="$@"
set --
elif [[ ${1} == apt-cacher-ng || ${1} == $(command -v apt-cacher-ng) ]]; then
EXTRA_ARGS="${@:2}"
set --
fi
# default behaviour is to launch apt-cacher-ng
if [[ -z ${1} ]]; then
exec start-stop-daemon --start --chuid ${APT_CACHER_NG_USER}:${APT_CACHER_NG_USER} \
--exec "$(command -v apt-cacher-ng)" -- -c /etc/apt-cacher-ng ${EXTRA_ARGS}
else
exec "$@"
fi
|
sameersbn/docker-apt-cacher-ng
|
entrypoint.sh
|
Shell
|
mit
| 1,050 |
#!/usr/bin/env sh
#
# Format the whole source tree with stylish-haskell. Skip generated output.
set -e
cd "$(dirname $0)/.."
stylish-haskell -i $(find lib examples cmd tests scripts -name '*.hs' | grep -v /gen/)
|
zenhack/haskell-capnp
|
scripts/format.sh
|
Shell
|
mit
| 212 |
#!/bin/bash
#PBS -l nodes=1:ppn=2,mem=16gb
module load bwa
module load nanopolish
module load samtools
set -x
GENOME=$1
FASTA=$2
TMP_DIR=$3
if [ $(echo $FASTA | grep -c -e "a$") -gt 0 ]; then
FMT="fasta"
elif [ $(echo $FASTA | grep -c -e "q$") -gt 0 ]; then
FMT="fastq"
else
echo "ERROR: $FASTA format not recognised"
exit 1
fi
INPUT_FASTA=$TMP_DIR/$(basename $FASTA).${PBS_ARRAYID}.$FMT
if [ ! -f ${INPUT_FASTA}.fa.gz ]; then
cp ${FASTA}.fa.gz ${INPUT_FASTA}.fa.gz
cp ${FASTA}.fa.gz.fai ${INPUT_FASTA}.fa.gz.fai
cp ${FASTA}.fa.gz.gzi ${INPUT_FASTA}.fa.gz.gzi
cp ${FASTA}.fa.gz.readdb ${INPUT_FASTA}.fa.gz.readdb
fi
cd ~/tmp
if [ ! -f ${INPUT_FASTA}.sorted.bam ]; then
bwa mem -x ont2d $GENOME $INPUT_FASTA | samtools sort -T ${INPUT_FASTA}.tmp -o ${INPUT_FASTA}.sorted.bam
fi
if [ ! -f ${INPUT_FASTA}.sorted.bam.bai ]; then
samtools index ${INPUT_FASTA}.sorted.bam || echo "ERROR: samtools index failed"
fi
nanopolish call-methylation -r $INPUT_FASTA -b ${INPUT_FASTA}.sorted.bam -g $GENOME > ${INPUT_FASTA}.methylation.tsv || echo "ERROR: nanopolish failed"
echo "COMPLETE"
|
scottgigante/nanopore-scripts
|
nanopolish_methylation.sh
|
Shell
|
mit
| 1,100 |
#!/bin/bash
sudo -- sh -c 'mysqldump -uroot -proot -S /var/run/mysqld/mysqld.sock --all-databases > /var/database_backup/db_backup.sql'
duply pi_backup backup
|
Kevin-De-Koninck/TerraPi
|
Webserver/BACKUP_DUPPLY_CRONJOB_SCRIPT.sh
|
Shell
|
mit
| 159 |
#!/usr/bin/env bash
set -e
echo "remove static libs"
rm -fv "$DESTDIR"/usr/lib/lib{com_err,e2p,ext2fs,ss}.a
echo "info dir"
gunzip -v "$DESTDIR"/usr/share/info/libext2fs.info.gz
# update in destdir - need to update in final dir when extracted
install-info --dir-file="$DESTDIR"/usr/share/info/dir /usr/share/info/libext2fs.info
|
pretorh/lfs-notes
|
scripts/6/main/e2fsprogs-post.sh
|
Shell
|
mit
| 330 |
#!/bin/bash
# _ _ _ _
# __ _ ___ | |_| |__ | | ___| |_ _ _
# / _` |/ _ \| __| '_ \| |/ _ \ __| | | |
#| (_| | (_) | |_| |_) | | __/ |_| |_| |
# \__, |\___/ \__|_.__/|_|\___|\__|\__,_|
# |___/
# https://www.youtube.com/user/gotbletu
# https://twitter.com/gotbletu
# https://plus.google.com/+gotbletu
# https://github.com/gotbletu
# [email protected]
# info: rofi-locate is a script to search local files and folders on your computer using the locate command and the updatedb database
# requirements: rofi mlocate
# playlist: rofi https://www.youtube.com/playlist?list=PLqv94xWU9zZ0LVP1SEFQsLEYjZC_SUB3m
xdg-open "$(locate home | rofi -threads 0 -width 100 -dmenu -i -p "locate:")"
|
brendan-R/dotfiles
|
local-share/rofi/locate.sh
|
Shell
|
mit
| 746 |
# Setup container sshd
[ -d /var/run/sshd ] || mkdir -p /var/run/sshd
DEST=/etc/ssh/sshd_config
# Deny root login via ssh
sed -i "/^PermitRootLogin.*/c PermitRootLogin no" $DEST
# Deny password login
sed -i "/^PasswordAuthentication *yes/c PasswordAuthentication no" $DEST
# Allow supervisor logs
sed -i "/^StrictModes yes/c StrictModes no" $DEST
# Change sshd port if it !=22
if [[ "$SSHD_PORT" != "22" ]] ; then
sed -i "/Port 22/c Port $SSHD_PORT" $DEST
fi
|
LeKovr/consup
|
Dockerfiles/gogs/init.d/sshd.sh
|
Shell
|
mit
| 469 |
#!/bin/bash
zip -r draft-pages * -x node_modules/\*
|
mediabeef/a2-draft-pages
|
zip.sh
|
Shell
|
mit
| 53 |
#/usr/bin/sh
###################################################
# 根据日志中的LOGIN标记,统计每个ID的登录次数
# 并按照登录次数0-5、大于5两类输出
###################################################
# the second test log
input=data/*.log
output=login_count_2.txt
cat $input | grep LOGIN | awk '{count[$5]++}END{for(name in count)print name,count[name]}' | sort -k2 -nr > $output
cat $output | awk '{
if ( $2 + 0 > 5 ) {
print $0;
}
}' | sort -k1 > $output-gt5
cat $output | awk '{
if ( $2 + 0 <= 5 ) {
print $0;
}
}' | sort -k1 > $output-let5
# the third test log
input=data/*.txt
output=login_count_3.txt
cat $input | grep LOGIN | awk '{count[$5]++}END{for(name in count)print name,count[name]}' | sort -k2 -nr > $output
cat $output | awk '{
if ( $2 + 0 > 5 ) {
print $0;
}
}' | sort -k1 > $output-gt5
cat $output | awk '{
if ( $2 + 0 <= 5 ) {
print $0;
}
}' | sort -k1 > $output-let5
|
jiangdapeng/netease
|
task/Basketball/count_login_times.sh
|
Shell
|
mit
| 959 |
#!/bin/bash
export HOME=/home/matt
/home/matt/.nvm/versions/node/v9.3.0/bin/node /home/matt/.nvm/versions/node/v9.3.0/bin/pm2 $1 $2
|
artemisbot/OcelBot
|
monit.sh
|
Shell
|
mit
| 133 |
#!/bin/sh
# wait for PSQL server to start
sleep 10
# prepare init migration
su -m myuser -c "python manage.py makemigrations"
# migrate db, so we have the latest db schema
su -m myuser -c "python manage.py migrate"
# start development server on public ip interface, on port 8000
su -m myuser -c "python manage.py runserver 0.0.0.0:8000"
|
thomasperrot/MTGTrader
|
docker/resources/bin/run_web.sh
|
Shell
|
mit
| 338 |
time python gibbs.py > data.tab
|
mathurshikhar/HPC
|
python.sh
|
Shell
|
mit
| 32 |
#!/bin/bash
#if [ -f "$(brew --prefix)/etc/bash_completion.d/git-completion.bash" ] && ! shopt -oq posix; then
# . "$(brew --prefix)/etc/bash_completion.d/git-completion.bash"
#fi
#function_exists() {
# declare -f -F $1 > /dev/null
# return $?
#}
#git config --get-regexp alias | awk -F'[ .]' '{print $2 }'
#for al in `__git_aliases`; do
# alias g$al="git $al"
#
# complete_func=_git_$(__git_aliased_command $al)
# function_exists $complete_fnc && __git_complete g$al $complete_func
#done
BASH_COMPLETION_DIR="${HOME}/bin/shell/completions"
BASH_COMPLETION_COMPAT_DIR="${HOME}/bin/shell/completions"
if [ -f "$(brew --prefix)/etc/bash_completion" ] && ! shopt -oq posix; then
source "$(brew --prefix)/etc/bash_completion"
fi
#Verify we have installed dependencies
if hash kubectl 2>/dev/null; then
source <(kubectl completion bash)
fi
|
StevenACoffman/dotfiles
|
bin/shell/bash_completion.sh
|
Shell
|
mit
| 866 |
USERNAME=$USER
BUILD_DIR=/home/$USERNAME/Source/
EXT_DIR=/var/www/static/extjs
sudo chmod +x ./build.sh
echo "Installing Ruby..."
sudo apt-get install ruby ruby1.9.1
echo "Downloading ExtJS library..."
sudo mkdir -p $EXT_DIR
sudo chown $USERNAME $EXT_DIR
(cd $EXT_DIR && wget http://cdn.sencha.com/ext/gpl/ext-4.2.1-gpl.zip)
(cd $EXT_DIR && unzip ext-4.2.1-gpl.zip)
mv $EXT_DIR/ext.4.2.1.883 $EXT_DIR/4.2.1
echo "Installing Sencha SDK..."
cd $BUILD_DIR
sudo wget http://cdn.sencha.io/sdk-tools/SenchaSDKTools-2.0.0-beta3-linux-x64.run
sudo chmod +x SenchaSDKTools-2.0.0-beta3-linux-x64.run
sudo ./SenchaSDKTools-2.0.0-beta3-linux-x64.run
# Graphical installer should start up... Install to /opt/ by default
# After the Sencha SDK Tools installer is finished...
export PATH=/opt/SenchaSDKTools-2.0.0-beta3:$PATH
export SENCHA_SDK_TOOLS_2_0_0_BETA3="/opt/SenchaSDKTools-2.0.0-beta3"
echo "Installing Sencha Cmd..."
cd $BUILD_DIR
wget http://cdn.sencha.com/cmd/4.0.5.87/SenchaCmd-4.0.5.87-linux-x64.run.zip
unzip SenchaCmd-4.0.5.87-linux-x64.run.zip
sudo chmod +x SenchaCmd-4.0.5.87-linux-x64.run
# This shouldn't have to be run as root; if it does, you'll need to chown and chgrp the bin/ folder created in your home directory
./SenchaCmd-4.0.5.87-linux-x64.run
# Graphical installer again...
# After the Sencha Cmd installer is finished...
export PATH=/home/$USERNAME/bin/Sencha/Cmd/4.0.5.87:$PATH
export SENCHA_CMD_4_0_5="/home/$USERNAME/bin/Sencha/Cmd/4.0.5.87"
|
arthur-e/flux-client
|
setup.sh
|
Shell
|
mit
| 1,468 |
#!/usr/bin/env bash
# Removes *.log, and *.dif files.
find . -name *.log -exec rm -f {} \;
find . -name *.dif -exec rm -f {} \;
|
Dzenly/tia
|
rm-logs.sh
|
Shell
|
mit
| 130 |
#!/bin/bash
HOME=$PWD
cd ../..
#Update the release version of the project
#version string will look similar to this: 0.1.0-DEV-27-060aec7
VERSION=$(head -1 ./version.info)
#do work on the version to get the correct info
#we need the version string from above to look like this: 0.1.1-DEV
IFS='.' read -a arr <<< "$VERSION"
#results in [0,1,0-DEV-27-060aec7]
IFS='-' read -a arr2 <<< "${arr[2]}"
#results in [0,DEV,27,060aec7]
let patch=${arr2[0]}+1
#echo $patch
VERSION="${arr[0]}.${arr[1]}.$patch-${arr2[1]}"
echo $VERSION
#update the POM
mvn versions:set -DnewVersion=$VERSION
cd $HOME
. createBuildRelease.sh
echo $VERSION
#commit the new patch version
git commit -a -m "Creating patch version $VERSION"
#tag the build
git tag -a v$VERSION -m "Patch Release Version $VERSION"
#push the build and tag
git push --follow-tags
|
warricksothr/ImageTools
|
build/linux/createPatchRelease.sh
|
Shell
|
mit
| 833 |
# Copyright (C) 2013 Wojciech Matyjewicz
#
# This file is distributed under the terms of the MIT License.
# See LICENSE file for details.
# Simple wrapper script
if [ $# -ne 1 ]
then
echo "Syntax: compile.sh <input>"
exit 1
fi
base=`basename $1 .pas`
./rascal $1 && llc $base.bc && gcc -g $base.s runtime.c
|
wmatyjewicz/rascal
|
compile.sh
|
Shell
|
mit
| 319 |
#!/usr/bin/env bash
ok=0
test_assert_fail()
{
if "$@" 2>/dev/null; then
echo "Error: assertion failure in ${test_name}."
exit 1
fi
ok=$(($ok+1))
}
test_assert_equal()
{
if ! diff -u $1 $2; then
echo "Error: assertion failure in ${test_name}."
exit 1
fi
ok=$(($ok+1))
}
declare_rename_test()
{
test_name="Rename::${1}"
test_input="qa/data/${2}"
test_output="qa/data/${2}.new-rename"
test_expected="qa/data/${2}.expected"
}
declare_rename_test "testFieldDecl" "rename-field-decl.cxx"
clang-rename -offset 27 -new-name=m_nX $test_input -- >${test_input}.new-rename
test_assert_equal $test_expected $test_output
## Do the same as previously, but trigger the csv parser this time.
#declare_rename_test "testFieldDeclCsv" "rename-field-decl.cxx"
#bin/rename -csv=qa/data/rename-field-decl.csv $test_input --
#test_assert_equal $test_expected $test_output
#
## Test that we fail on non-existing -csv parameter.
#declare_rename_test "testFieldDeclCsvFail" "rename-field-decl.cxx"
#test_assert_fail bin/rename -csv=qa/data/rename-field-decl.cvs $test_input --
#
## Test that the first column can't be empty.
#declare_rename_test "testFieldDeclCsvFailCol1Empty" "rename-field-decl.cxx"
#test_assert_fail bin/rename -csv=qa/data/rename-field-decl.csv-emptycol1 $test_input --
#
## Test that the second column can't be empty.
#declare_rename_test "testFieldDeclCsvFailCol2Empty" "rename-field-decl.cxx"
#test_assert_fail bin/rename -csv=qa/data/rename-field-decl.csv-emptycol2 $test_input --
#
## Test that rename fails without options.
#declare_rename_test "testFieldDeclCsvFailNoopt" "rename-field-decl.cxx"
#test_assert_fail bin/rename qa/data/rename-field-decl.cxx --
#
## Test that rename dump creates no output
#declare_rename_test "testFieldDeclDump" "rename-field-decl.cxx"
#rm -f qa/data/rename-field-decl.cxx.new-rename
#bin/rename -csv=qa/data/rename-field-decl.csv -dump qa/data/rename-field-decl.cxx -- 2>/dev/null
#test_assert_fail test -f qa/data/rename-field-decl.cxx.new-rename
#
declare_rename_test "testVarDecl" "rename-var-decl.cxx"
clang-rename -offset 40 -new-name m_aS $test_input -- >${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testVarDeclClass" "rename-var-decl-class.cxx"
clang-rename -offset 6 -new-name D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXConstructorDecl" "rename-cxx-constructor-decl.cxx"
clang-rename -offset 49 -new-name m_nX $test_input -- > tmp.cxx
clang-rename -offset 61 -new-name m_aA tmp.cxx -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
rm -f tmp.cxx
#
declare_rename_test "testCXXConstructorDeclClass" "rename-cxx-constructor-decl-class.cxx"
clang-rename -offset 6 -new-name D $test_input -- >${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testMemberExpr" "rename-member-expr.cxx"
clang-rename -offset 26 -new-name=m_nX $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testDeclRefExpr" "rename-decl-ref-expr.cxx"
clang-rename -offset 39 -new-name=m_aS $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXMethodDecl" "rename-cxx-method-decl.cxx"
clang-rename -offset 27 -new-name=bar $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXConstructExpr" "rename-cxx-constructor-expr.cxx"
clang-rename -offset 6 -new-name=D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXStaticCastExpr" "rename-cxx-static-cast-expr.cxx"
clang-rename -offset 6 -new-name=D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXDynamicCastExpr" "rename-cxx-dynamic-cast-expr.cxx"
clang-rename -offset 6 -new-name=D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXReinterpretCastExpr" "rename-cxx-reinterpret-cast-expr.cxx"
clang-rename -offset 6 -new-name=D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXConstCastExpr" "rename-cxx-const-cast-expr.cxx"
clang-rename -offset 6 -new-name=D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
#
declare_rename_test "testCXXDestructorDecl" "rename-cxx-destructor-decl.cxx"
clang-rename -offset 6 -new-name=D $test_input -- > ${test_input}.new-rename
test_assert_equal $test_expected $test_output
echo "OK ($ok)"
# vi:set shiftwidth=4 expandtab:
|
vmiklos/vmexam
|
llvm/qa/test-rename.sh
|
Shell
|
mit
| 4,792 |
#!/bin/sh
# This script depends on the GNU script makeself.sh found at: http://megastep.org/makeself/
# Note: The structure of this package depends on the -rpath,./lib to be set at compile/link time.
version="1.6.3"
arch=`uname -i`
if [ "${arch}" = "x86_64" ]; then
arch="64bit"
QtLIBPATH="${HOME}/Qt/5.4/gcc_64"
else
arch="32bit"
QtLIBPATH="${HOME}/Qt/5.4/gcc"
fi
if [ -f renminbi-qt ] && [ -f renminbi.conf ] && [ -f README ]; then
echo "Building Renminbi_${version}_${arch}.run ...\n"
if [ -d Renminbi_${version}_${arch} ]; then
rm -fr Renminbi_${version}_${arch}/
fi
mkdir Renminbi_${version}_${arch}
mkdir Renminbi_${version}_${arch}/libs
mkdir Renminbi_${version}_${arch}/platforms
mkdir Renminbi_${version}_${arch}/imageformats
cp renminbi-qt Renminbi_${version}_${arch}/
cp renminbi.conf Renminbi_${version}_${arch}/
cp README Renminbi_${version}_${arch}/
ldd renminbi-qt | grep libssl | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libdb_cxx | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libboost_system | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libboost_filesystem | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libboost_program_options | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libboost_thread | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libminiupnpc | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
ldd renminbi-qt | grep libqrencode | awk '{ printf("%s\0", $3); }' | xargs -0 -I{} cp {} Renminbi_${version}_${arch}/libs/
cp ${QtLIBPATH}/lib/libQt*.so.5 Renminbi_${version}_${arch}/libs/
cp ${QtLIBPATH}/lib/libicu*.so.53 Renminbi_${version}_${arch}/libs/
cp ${QtLIBPATH}/plugins/platforms/lib*.so Renminbi_${version}_${arch}/platforms/
cp ${QtLIBPATH}/plugins/imageformats/lib*.so Renminbi_${version}_${arch}/imageformats/
strip Renminbi_${version}_${arch}/renminbi-qt
echo "Enter your sudo password to change the ownership of the archive: "
sudo chown -R nobody:nogroup Renminbi_${version}_${arch}
# now build the archive
if [ -f Renminbi_${version}_${arch}.run ]; then
rm -f Renminbi_${version}_${arch}.run
fi
makeself.sh --notemp Renminbi_${version}_${arch} Renminbi_${version}_${arch}.run "\nCopyright (c) 2014-2015 The Renminbi Developers\nRenminbi will start when the installation is complete...\n" ./renminbi-qt \&
sudo rm -fr Renminbi_${version}_${arch}/
echo "Package created in: $PWD/Renminbi_${version}_${arch}.run\n"
else
echo "Error: Missing files!\n"
echo "Copy this file to a setup folder along with renminbi-qt, renminbi.conf and README.\n"
fi
|
benzhi888/renminbi
|
release/linux_setup_builder.sh
|
Shell
|
mit
| 3,080 |
set -e
ant
mkdir -p build/sample_classes
javac -cp ./build/jar/EditDistanceJoiner.jar -sourcepath sample -d build/sample_classes sample/edu/tsinghua/dbgroup/sample/EditDistanceJoinerTest.java
for thread in `seq 8 1 8`
do
time java -cp ./build/jar/EditDistanceJoiner.jar:./build/sample_classes edu.tsinghua.dbgroup.sample.EditDistanceJoinerTest 2 testdata/join.big.in testdata/join.big.out $thread
if diff testdata/join.big.std testdata/join.big.out > testdata/join.big.diff
then
echo 'EditDistanceJoiner Test Succeed'
echo '==============================='
else
echo 'EditDistanceJoiner Test Fail'
exit -1
fi
done
|
lispc/EditDistanceClusterer
|
test_join.sh
|
Shell
|
mit
| 666 |
#!/usr/bin/env bash
# Convenience script to build Infer when using opam
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
set -e
set -o pipefail
set -u
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
INFER_ROOT="$SCRIPT_DIR"
DEPENDENCIES_DIR="$INFER_ROOT/facebook/dependencies"
PLATFORM="$(uname)"
SANDCASTLE=${SANDCASTLE:-}
NCPU="$(getconf _NPROCESSORS_ONLN 2>/dev/null || echo 1)"
INFER_OPAM_DEFAULT_SWITCH="4.12.0+flambda"
INFER_OPAM_DEFAULT_SWITCH_OPTIONS="--package=ocaml-variants.4.12.0+options,ocaml-option-flambda"
INFER_OPAM_SWITCH=${INFER_OPAM_SWITCH:-$INFER_OPAM_DEFAULT_SWITCH}
INFER_OPAM_SWITCH_OPTIONS=${INFER_OPAM_SWITCH_OPTIONS:-$INFER_OPAM_DEFAULT_SWITCH_OPTIONS}
PLUGIN_DIR="$INFER_ROOT/facebook-clang-plugins"
PLUGIN_SETUP_SCRIPT=${PLUGIN_SETUP_SCRIPT:-setup.sh}
PLUGIN_SETUP="${PLUGIN_DIR}/clang/${PLUGIN_SETUP_SCRIPT}"
function usage() {
echo "Usage: $0 [-y] [targets]"
echo
echo " targets:"
echo " all build everything (default)"
echo " clang build C and Objective-C analyzer"
echo " erlang build Erlang analyzer"
echo " java build Java analyzer"
echo
echo " options:"
echo " -h,--help show this message"
echo " --no-opam-lock do not use the opam/infer.opam.locked file and let opam resolve dependencies"
echo " --only-setup-opam initialize opam, install the opam dependencies of infer, and exit"
echo " --user-opam-switch use the current opam switch to install infer (default: $INFER_OPAM_DEFAULT_SWITCH)"
echo " -y,--yes automatically agree to everything"
echo
echo " examples:"
echo " $0 # build Java, Erlang and C/Objective-C analyzers"
echo " $0 java erlang clang # equivalent way of doing the above"
echo " $0 java # build only the Java analyzer"
}
# arguments
BUILD_CLANG=${BUILD_CLANG:-no}
BUILD_ERLANG=${BUILD_ERLANG:-no}
BUILD_JAVA=${BUILD_JAVA:-no}
INFER_CONFIGURE_OPTS=${INFER_CONFIGURE_OPTS:-""}
INTERACTIVE=${INTERACTIVE:-yes}
JOBS=${JOBS:-$NCPU}
ONLY_SETUP_OPAM=${ONLY_SETUP_OPAM:-no}
USE_OPAM_LOCK=${USE_OPAM_LOCK:-yes}
USER_OPAM_SWITCH=no
ORIG_ARGS="$*"
function build_all() {
BUILD_CLANG=yes
BUILD_ERLANG=yes
BUILD_JAVA=yes
}
while [[ $# -gt 0 ]]; do
opt_key="$1"
case $opt_key in
all)
build_all
shift
continue
;;
clang)
BUILD_CLANG=yes
shift
continue
;;
erlang)
BUILD_ERLANG=yes
shift
continue
;;
java)
BUILD_JAVA=yes
shift
continue
;;
-h|--help)
usage
exit 0
;;
--no-opam-lock)
USE_OPAM_LOCK=no
shift
continue
;;
--user-opam-switch)
USER_OPAM_SWITCH=yes
shift
continue
;;
--only-setup-opam)
ONLY_SETUP_OPAM=yes
shift
continue
;;
-y|--yes)
INTERACTIVE=no
shift
continue
;;
*)
usage
exit 1
esac
shift
done
if [ "$BUILD_CLANG" == "no" ] && [ "$BUILD_ERLANG" == "no" ] && [ "$BUILD_JAVA" == "no" ]; then
build_all
fi
# enable --yes option for some commands in non-interactive mode
YES=
if [ "$INTERACTIVE" == "no" ]; then
YES=--yes
fi
# --yes by default for opam commands except if we are using the user's opam switch
if [ "$INTERACTIVE" == "no" ] || [ "$USER_OPAM_SWITCH" == "no" ]; then
export OPAMYES=true
fi
setup_opam () {
opam var root 1>/dev/null 2>/dev/null || opam init --reinit --bare --no-setup &&
opam_retry opam_switch_create_if_needed "$INFER_OPAM_SWITCH" "$INFER_OPAM_SWITCH_OPTIONS" &&
opam switch set "$INFER_OPAM_SWITCH"
}
install_opam_deps () {
local locked=
if [ "$USE_OPAM_LOCK" == yes ]; then
locked=.locked
fi
opam install --deps-only "$INFER_ROOT"/opam/infer.opam$locked &&
if [ -n "$SANDCASTLE" ]; then
opam pin list | grep yojson || opam pin add yojson "${DEPENDENCIES_DIR}/yojson-1.7.0fix"
fi
}
echo "initializing opam... " >&2
. "$INFER_ROOT"/scripts/opam_utils.sh
if [ "$USER_OPAM_SWITCH" == "no" ]; then
setup_opam
fi
eval $(SHELL=bash opam env)
echo >&2
echo "installing infer dependencies; this can take up to 30 minutes... " >&2
opam_retry install_opam_deps
if [ "$ONLY_SETUP_OPAM" == "yes" ]; then
exit 0
fi
echo "preparing build... " >&2
./autogen.sh > /dev/null
if [ "$BUILD_CLANG" == "no" ]; then
INFER_CONFIGURE_OPTS+=" --disable-c-analyzers"
fi
if [ "$BUILD_ERLANG" == "no" ]; then
INFER_CONFIGURE_OPTS+=" --disable-erlang-analyzers"
fi
if [ "$BUILD_JAVA" == "no" ]; then
INFER_CONFIGURE_OPTS+=" --disable-java-analyzers"
fi
./configure $INFER_CONFIGURE_OPTS
if [ "$BUILD_CLANG" == "yes" ]; then
if ! "$PLUGIN_SETUP" --only-check-install; then
echo ""
echo " Warning: you are not using a release of Infer. The C and"
echo " Objective-C analyses require a custom clang to be compiled"
echo " now. This step takes ~30-60 minutes, possibly more."
echo ""
echo " To speed this along, you are encouraged to use a release of"
echo " Infer instead:"
echo ""
echo " http://fbinfer.com/docs/getting-started"
echo ""
echo " If you are only interested in analyzing Java programs, simply"
echo " run this script with only the \"java\" argument:"
echo ""
echo " $0 java"
echo ""
confirm="n"
printf "Are you sure you want to compile clang? (y/N) "
if [ "$INTERACTIVE" == "no" ]; then
confirm="y"
echo "$confirm"
else
read confirm
fi
if [ "x$confirm" != "xy" ]; then
exit 0
fi
# only run this script if we are definitely building clang
facebook-clang-plugins/clang/src/prepare_clang_src.sh
fi
fi
make -j "$JOBS" opt || (
echo >&2
echo ' compilation failure; you can try running' >&2
echo >&2
echo ' make clean' >&2
echo " '$0' $ORIG_ARGS" >&2
echo >&2
exit 1)
echo
echo "*** Success! Infer is now built in '$SCRIPT_DIR/infer/bin/'."
echo '*** Install infer on your system with `make install`.'
echo
echo '*** If you plan to hack on infer, check out CONTRIBUTING.md to setup your dev environment.'
|
jvillard/infer
|
build-infer.sh
|
Shell
|
mit
| 6,283 |
apt-get remove libsodium-dev
apt-get install php-pear php-bcmath php-memcached php-curl memcached php7.2-dev php-gmp php7.2-mbstring -y
cd lib
git clone https://github.com/jedisct1/libsodium --branch stable
cd libsodium
./configure
make && make check
make install
pecl install libsodium
cd ..
if ! grep -q sodium.so $(php --ini | grep Loaded | cut -d" " -f12); then
echo "extension=sodium.so" >> $(php --ini | grep Loaded | cut -d" " -f12)
fi
git clone https://github.com/karek314/bytebuffer/
git clone https://github.com/karek314/php-aes-gcm
git clone https://github.com/beberlei/assert
git clone https://github.com/symfony/polyfill-mbstring
|
karek314/lisk-php
|
setup_16.04.sh
|
Shell
|
mit
| 644 |
#!/bin/bash
# delete.sh - Made for Puppi
# Sources common header for Puppi scripts
. $(dirname $0)/header || exit 10
# Manage script variables
if [ $1 ] ; then
tobedeleted=$1
else
echo "You must provide a file or directory to delete!"
exit 2
fi
if [ "$tobedeleted" = "/" ] ; then
echo "Be Serious!"
exit 2
fi
# Move file
move () {
mkdir /tmp/puppi/$project/deleted
mv $tobedeleted /tmp/puppi/$project/deleted
}
move
|
jaytaph/symfony2-puppet-new
|
support/puppet/modules/puppi/files/scripts/delete.sh
|
Shell
|
mit
| 450 |
#!/bin/sh
set -e
umount /home/eax
cryptsetup luksClose eax_home
losetup -d /dev/loop0
|
afiskon/archlinux-on-desktop
|
home/umount.sh
|
Shell
|
mit
| 88 |
#!/bin/bash
# this file is applicable for each user that logs on (hence runs at log on time)
# prerequisites
#
# have copied the inner mac_os_scripts folder to /usr/local/zetta as well as the
# two bash scripts
# logging stuff
LOG_FILENAME=/tmp/mac_os_scripts_${USER}_run_during_logon.log
STDOUT_LOG_FILENAME=/tmp/mac_os_scripts_${USER}_run_during_logon_stdout.log
STDERR_LOG_FILENAME=/tmp/mac_os_scripts_${USER}_run_during_logon_stderr.log
source /usr/local/zetta/include.sh
log '!!!! started'
log 'setting some environment variables'
# fully qualified domain name of the file server
FQDN='grayfs01.grayman.com.au'
# path to the user's share ($USER is the name of the currently logged on user)
USER_SHARE_PATH="homedrives$/$USER"
# path to the group drive
GROUP_SHARE_PATH="groupdata"
# need to run scripts from here because of Python path requirements
run_and_log cd /usr/local/zetta/
run_and_log python -m mac_os_scripts.disable_handoff
# python -m mac_os_scripts.change_background # set in user template
run_and_log python -m mac_os_scripts.disable_airdrop
run_and_log python -m mac_os_scripts.enable_reduced_transparency
run_and_log python -m mac_os_scripts.disable_metadata_file_creation
run_and_log python -m mac_os_scripts.map_drive -f "$FQDN" -s "$USER_SHARE_PATH"
log '!!!! finished'
|
initialed85/mac_os_scripts
|
run_during_logon.sh
|
Shell
|
mit
| 1,306 |
#!/bin/bash
# $1 should be supplied with the command: mysql_restore_outfile.sh <database_name>
DBNAME=$1
# For now just set the Source DB the same as DBNAME, we can change this later
SOURCEDB=${DBNAME}
if [ "$USER" != "root" ]; then
echo "You are not root user, use: sudo backup"
exit
fi
clear
echo "|-------------------------------------------------------------"
echo "| Restoring MySQL Database From Backup "
echo "|-------------------------------------------------------------"
echo ""
if [ -z "$1" ]
then
echo "Destination DB is not defined, specify with command:"
echo "mysql_restore_outfile.sh database_name"
exit 1
fi
read -p "Are you sure you want to overwrite $1? " -n 1
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "==========================="
echo " Creating ${DBNAME}"
mysql -uroot -e "CREATE DATABASE IF NOT EXISTS ${DBNAME}"
else
exit 1
fi
echo "==========================="
echo " IMPORTING SCHEMA"
mysql -uroot ${DBNAME} < ${DBNAME}/schema.sql
PRIORITIES=( <%=mysql_restore_table_priorities%> )
for TABLE in ${PRIORITIES}
do
echo "==========================="
echo " IMPORT PRIORITY ${TABLE}"
mysql -uroot --database=${DBNAME} --execute="LOAD DATA LOCAL INFILE '${DBNAME}/${TABLE}' INTO TABLE ${TABLE}"
done
# Import the rest of the tables
TABLES=`ls -tr ${SOURCEDB}`
for TABLE in ${TABLES}
do
# I had issues with this part, still may be wrong. Issues with mysql_restore_table_priorities is an array
if [[ ${TABLE} =~ ${PRIORITIES} ]]; then
mysql -uroot --database=${DBNAME} --execute="LOAD DATA LOCAL INFILE '${DBNAME}/${TABLE}' INTO TABLE ${TABLE}"
echo "==========================="
echo " IMPORT ${TABLE}"
else
exit 1
fi
done
#clear
echo "|-------------------------------------------------------------"
echo "| Finished Restoring MySQL Database From Backup "
echo "|-------------------------------------------------------------"
echo ""
|
donnoman/cap-recipes
|
lib/cap_recipes/tasks/mysql/mysql_restore_outfile.sh
|
Shell
|
mit
| 1,951 |
#!/bin/bash
# Inspired by AngularJS's finalize-version script
# force user to deifne git-push-dryrun so he has to think!
ARG_DEFS=(
"--git-push-dryrun=(true|false)"
"--action=(prepare|publish)"
)
function prepare {
cd ../..
# Remove suffix
OLD_VERSION=$(readJsonProp "package.json" "version")
VERSION=$(echo $OLD_VERSION | sed 's/-.*//')
replaceJsonProp "package.json" "version" "$VERSION"
CODENAME=$(readJsonProp "package.json" "codename")
replaceJsonProp "bower.json" "version" "$VERSION"
replaceJsonProp "component.json" "version" "$VERSION"
echo "-- Building and putting files in release folder"
grunt build
mkdir -p release
cp -Rf dist/* release
grunt changelog
git add package.json bower.json component.json release CHANGELOG.md
git commit -m "chore(release): v$VERSION \"$CODENAME\""
git tag -f -m "v$VERSION" v$VERSION
echo "--"
echo "-- Version is now $VERSION, codename $CODENAME."
echo "-- Release commit & tag created. Changelog created."
echo "-- Suggestion: read over the changelog and fix any mistakes, then run git commit -a --amend."
echo "-- When ready to push, run ./scripts/finalize-version.sh --action=publish"
echo "--"
}
function publish {
cd ../..
VERSION=$(readJsonProp "package.json" "version")
git push origin master
git push origin v$VERSION
echo "-- Version published as v$VERSION successfully!"
cd $SCRIPT_DIR
}
source $(dirname $0)/../utils.inc
|
1900/ionic
|
scripts/release/finalize-version.sh
|
Shell
|
mit
| 1,453 |
#!/bin/bash
# Docs by jazzy
# https://github.com/realm/jazzy
# ------------------------------
jazzy -o docs/ \
--source-directory . \
--readme README.md \
-a 'Jesse Squires' \
-u 'https://twitter.com/jesse_squires' \
-m 'DefaultStringConvertible' \
-g 'https://github.com/jessesquires/DefaultStringConvertible'
|
jessesquires/DefaultStringConvertible
|
build_docs.sh
|
Shell
|
mit
| 349 |
#! /bin/bash
function bintray-upload() {
URL="https://api.bintray.com/content/$BINTRAY_USER/deb/$PACKAGE/$VERSION/$DEB"
echo "Uploading $DEB"
curl -v --progress-bar -T $DEB -u$BINTRAY_USER:$BINTRAY_TOKEN $URL \
-H 'X-Bintray-Publish: 1' \
-H 'X-Bintray-Override: 1' \
-H 'X-Bintray-Debian-Distribution: stable' \
-H 'X-Bintray-Debian-Component: contrib' \
-H 'X-Bintray-Debian-Architecture: amd64'
echo
}
function bintray-create-version() {
URL="https://api.bintray.com/packages/$BINTRAY_USER/deb/$PACKAGE/versions"
echo "Creating version $VERSION for $PACKAGE"
curl -v -u$BINTRAY_USER:$BINTRAY_TOKEN $URL \
-d "{ \"name\": \"$VERSION\" }"
echo
}
|
gmatheu/deb-packages
|
bintray.sh
|
Shell
|
mit
| 692 |
#!/bin/sh
file="$1"
lval="$2"
gnuplot -persist <<EOI
set grid
plot "$file-score-$lval.out" using 1:(log(\$3)) with linespoints
EOI
|
cianmj/nrc
|
numerov/plot-scan.sh
|
Shell
|
mit
| 131 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.