code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This command builds and runs a local kubernetes cluster.
if [ "$(which etcd)" == "" ]; then
echo "etcd must be in your PATH"
exit 1
fi
# Stop right away if the build fails
set -e
# Only build what we need
(
source $(dirname $0)/config-go.sh
cd "${KUBE_TARGET}"
BINARIES="cloudcfg localkube"
for b in $BINARIES; do
echo "+++ Building ${b}"
go build "${KUBE_GO_PACKAGE}"/cmd/${b}
done
)
echo "Starting etcd"
ETCD_DIR=$(mktemp -d -t kube-integration.XXXXXX)
trap "rm -rf ${ETCD_DIR}" EXIT
(etcd -name test -data-dir ${ETCD_DIR} > /tmp/etcd.log) &
ETCD_PID=$!
sleep 5
echo "Running localkube as root (so it can talk to docker's unix socket)"
sudo $(dirname $0)/../output/go/localkube
kill $ETCD_PID
|
discordianfish/kubernetes
|
hack/local-up.sh
|
Shell
|
apache-2.0
| 1,336 |
#!/bin/bash
TARGET="/tmp/elasticsearch"
if [ ! -f "$TARGET/elasticsearch-1.6.2/bin/elasticsearch" ]; then
echo "$TARGET not found. Building..."
pushd $TARGET
wget https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.6.2.tar.gz
tar xvf elasticsearch-1.6.2.tar.gz
elasticsearch-1.6.2/bin/plugin -install elasticsearch/elasticsearch-analysis-icu/2.6.0
else
echo "$TARGET already exists"
fi
|
jasonthomas/zamboni
|
scripts/travis_es.sh
|
Shell
|
bsd-3-clause
| 433 |
#!/bin/bash -ex
runghc app/stackage.hs select $*
runghc app/stackage.hs check
runghc app/stackage.hs build
runghc app/stackage.hs test
|
pmiddend/stackage
|
full-run.sh
|
Shell
|
mit
| 136 |
#!/bin/sh
test_description='test separate work tree'
. ./test-lib.sh
test_expect_success 'setup' '
EMPTY_TREE=$(git write-tree) &&
EMPTY_BLOB=$(git hash-object -t blob --stdin </dev/null) &&
CHANGED_BLOB=$(echo changed | git hash-object -t blob --stdin) &&
EMPTY_BLOB7=$(echo $EMPTY_BLOB | sed "s/\(.......\).*/\1/") &&
CHANGED_BLOB7=$(echo $CHANGED_BLOB | sed "s/\(.......\).*/\1/") &&
mkdir -p work/sub/dir &&
mkdir -p work2 &&
mv .git repo.git
'
test_expect_success 'setup: helper for testing rev-parse' '
test_rev_parse() {
echo $1 >expected.bare &&
echo $2 >expected.inside-git &&
echo $3 >expected.inside-worktree &&
if test $# -ge 4
then
echo $4 >expected.prefix
fi &&
git rev-parse --is-bare-repository >actual.bare &&
git rev-parse --is-inside-git-dir >actual.inside-git &&
git rev-parse --is-inside-work-tree >actual.inside-worktree &&
if test $# -ge 4
then
git rev-parse --show-prefix >actual.prefix
fi &&
test_cmp expected.bare actual.bare &&
test_cmp expected.inside-git actual.inside-git &&
test_cmp expected.inside-worktree actual.inside-worktree &&
if test $# -ge 4
then
# rev-parse --show-prefix should output
# a single newline when at the top of the work tree,
# but we test for that separately.
test -z "$4" && ! test -s actual.prefix ||
test_cmp expected.prefix actual.prefix
fi
}
'
test_expect_success 'setup: core.worktree = relative path' '
sane_unset GIT_WORK_TREE &&
GIT_DIR=repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
export GIT_DIR GIT_CONFIG &&
git config core.worktree ../work
'
test_expect_success 'outside' '
test_rev_parse false false false
'
test_expect_success 'inside work tree' '
(
cd work &&
GIT_DIR=../repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
test_rev_parse false false true ""
)
'
test_expect_failure 'empty prefix is actually written out' '
echo >expected &&
(
cd work &&
GIT_DIR=../repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
git rev-parse --show-prefix >../actual
) &&
test_cmp expected actual
'
test_expect_success 'subdir of work tree' '
(
cd work/sub/dir &&
GIT_DIR=../../../repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
test_rev_parse false false true sub/dir/
)
'
test_expect_success 'setup: core.worktree = absolute path' '
sane_unset GIT_WORK_TREE &&
GIT_DIR=$(pwd)/repo.git &&
GIT_CONFIG=$GIT_DIR/config &&
export GIT_DIR GIT_CONFIG &&
git config core.worktree "$(pwd)/work"
'
test_expect_success 'outside' '
test_rev_parse false false false &&
(
cd work2 &&
test_rev_parse false false false
)
'
test_expect_success 'inside work tree' '
(
cd work &&
test_rev_parse false false true ""
)
'
test_expect_success 'subdir of work tree' '
(
cd work/sub/dir &&
test_rev_parse false false true sub/dir/
)
'
test_expect_success 'setup: GIT_WORK_TREE=relative (override core.worktree)' '
GIT_DIR=$(pwd)/repo.git &&
GIT_CONFIG=$GIT_DIR/config &&
git config core.worktree non-existent &&
GIT_WORK_TREE=work &&
export GIT_DIR GIT_CONFIG GIT_WORK_TREE
'
test_expect_success 'outside' '
test_rev_parse false false false &&
(
cd work2 &&
test_rev_parse false false false
)
'
test_expect_success 'inside work tree' '
(
cd work &&
GIT_WORK_TREE=. &&
test_rev_parse false false true ""
)
'
test_expect_success 'subdir of work tree' '
(
cd work/sub/dir &&
GIT_WORK_TREE=../.. &&
test_rev_parse false false true sub/dir/
)
'
test_expect_success 'setup: GIT_WORK_TREE=absolute, below git dir' '
mv work repo.git/work &&
mv work2 repo.git/work2 &&
GIT_DIR=$(pwd)/repo.git &&
GIT_CONFIG=$GIT_DIR/config &&
GIT_WORK_TREE=$(pwd)/repo.git/work &&
export GIT_DIR GIT_CONFIG GIT_WORK_TREE
'
test_expect_success 'outside' '
echo outside &&
test_rev_parse false false false
'
test_expect_success 'in repo.git' '
(
cd repo.git &&
test_rev_parse false true false
) &&
(
cd repo.git/objects &&
test_rev_parse false true false
) &&
(
cd repo.git/work2 &&
test_rev_parse false true false
)
'
test_expect_success 'inside work tree' '
(
cd repo.git/work &&
test_rev_parse false true true ""
)
'
test_expect_success 'subdir of work tree' '
(
cd repo.git/work/sub/dir &&
test_rev_parse false true true sub/dir/
)
'
test_expect_success 'find work tree from repo' '
echo sub/dir/untracked >expected &&
cat <<-\EOF >repo.git/work/.gitignore &&
expected.*
actual.*
.gitignore
EOF
>repo.git/work/sub/dir/untracked &&
(
cd repo.git &&
git ls-files --others --exclude-standard >../actual
) &&
test_cmp expected actual
'
test_expect_success 'find work tree from work tree' '
echo sub/dir/tracked >expected &&
>repo.git/work/sub/dir/tracked &&
(
cd repo.git/work/sub/dir &&
git --git-dir=../../.. add tracked
) &&
(
cd repo.git &&
git ls-files >../actual
) &&
test_cmp expected actual
'
test_expect_success '_gently() groks relative GIT_DIR & GIT_WORK_TREE' '
(
cd repo.git/work/sub/dir &&
GIT_DIR=../../.. &&
GIT_WORK_TREE=../.. &&
GIT_PAGER= &&
export GIT_DIR GIT_WORK_TREE GIT_PAGER &&
git diff --exit-code tracked &&
echo changed >tracked &&
test_must_fail git diff --exit-code tracked
)
'
test_expect_success 'diff-index respects work tree under .git dir' '
cat >diff-index-cached.expected <<-EOF &&
:000000 100644 $_z40 $EMPTY_BLOB A sub/dir/tracked
EOF
cat >diff-index.expected <<-EOF &&
:000000 100644 $_z40 $_z40 A sub/dir/tracked
EOF
(
GIT_DIR=repo.git &&
GIT_WORK_TREE=repo.git/work &&
export GIT_DIR GIT_WORK_TREE &&
git diff-index $EMPTY_TREE >diff-index.actual &&
git diff-index --cached $EMPTY_TREE >diff-index-cached.actual
) &&
test_cmp diff-index.expected diff-index.actual &&
test_cmp diff-index-cached.expected diff-index-cached.actual
'
test_expect_success 'diff-files respects work tree under .git dir' '
cat >diff-files.expected <<-EOF &&
:100644 100644 $EMPTY_BLOB $_z40 M sub/dir/tracked
EOF
(
GIT_DIR=repo.git &&
GIT_WORK_TREE=repo.git/work &&
export GIT_DIR GIT_WORK_TREE &&
git diff-files >diff-files.actual
) &&
test_cmp diff-files.expected diff-files.actual
'
test_expect_success 'git diff respects work tree under .git dir' '
cat >diff-TREE.expected <<-EOF &&
diff --git a/sub/dir/tracked b/sub/dir/tracked
new file mode 100644
index 0000000..$CHANGED_BLOB7
--- /dev/null
+++ b/sub/dir/tracked
@@ -0,0 +1 @@
+changed
EOF
cat >diff-TREE-cached.expected <<-EOF &&
diff --git a/sub/dir/tracked b/sub/dir/tracked
new file mode 100644
index 0000000..$EMPTY_BLOB7
EOF
cat >diff-FILES.expected <<-EOF &&
diff --git a/sub/dir/tracked b/sub/dir/tracked
index $EMPTY_BLOB7..$CHANGED_BLOB7 100644
--- a/sub/dir/tracked
+++ b/sub/dir/tracked
@@ -0,0 +1 @@
+changed
EOF
(
GIT_DIR=repo.git &&
GIT_WORK_TREE=repo.git/work &&
export GIT_DIR GIT_WORK_TREE &&
git diff $EMPTY_TREE >diff-TREE.actual &&
git diff --cached $EMPTY_TREE >diff-TREE-cached.actual &&
git diff >diff-FILES.actual
) &&
test_cmp diff-TREE.expected diff-TREE.actual &&
test_cmp diff-TREE-cached.expected diff-TREE-cached.actual &&
test_cmp diff-FILES.expected diff-FILES.actual
'
test_expect_success 'git grep' '
echo dir/tracked >expected.grep &&
(
cd repo.git/work/sub &&
GIT_DIR=../.. &&
GIT_WORK_TREE=.. &&
export GIT_DIR GIT_WORK_TREE &&
git grep -l changed >../../../actual.grep
) &&
test_cmp expected.grep actual.grep
'
test_expect_success 'git commit' '
(
cd repo.git &&
GIT_DIR=. GIT_WORK_TREE=work git commit -a -m done
)
'
test_expect_success 'absolute pathspec should fail gracefully' '
(
cd repo.git &&
test_might_fail git config --unset core.worktree &&
test_must_fail git log HEAD -- /home
)
'
test_expect_success 'make_relative_path handles double slashes in GIT_DIR' '
>dummy_file &&
echo git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file &&
git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file
'
test_expect_success 'relative $GIT_WORK_TREE and git subprocesses' '
GIT_DIR=repo.git GIT_WORK_TREE=repo.git/work \
test-subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
echo "$(pwd)/repo.git/work" >expected &&
test_cmp expected actual
'
test_done
|
schaary/studipET2012
|
t/t1501-worktree.sh
|
Shell
|
gpl-2.0
| 8,220 |
#!/bin/bash
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run Neutron's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
echo " -h, --help Print this usage message"
echo " --virtual-env-path <path> Location of the virtualenv directory"
echo " Default: \$(pwd)"
echo " --virtual-env-name <name> Name of the virtualenv directory"
echo " Default: .venv"
echo " --tools-path <dir> Location of the tools directory"
echo " Default: \$(pwd)"
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_options {
i=1
while [ $i -le $# ]; do
case "${!i}" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-s|--no-site-packages) no_site_packages=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_pep8=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
--virtual-env-path)
(( i++ ))
venv_path=${!i}
;;
--virtual-env-name)
(( i++ ))
venv_dir=${!i}
;;
--tools-path)
(( i++ ))
tools_path=${!i}
;;
-*) testropts="$testropts ${!i}";;
*) testrargs="$testrargs ${!i}"
esac
(( i++ ))
done
}
tool_path=${tools_path:-$(pwd)}
venv_path=${venv_path:-$(pwd)}
venv_dir=${venv_name:-.venv}
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
no_site_packages=0
installvenvopts=
testrargs=
testropts=
wrapper=""
just_pep8=0
no_pep8=0
coverage=0
debug=0
recreate_db=1
update=0
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
process_options $@
# Make our paths available to other scripts we call
export venv_path
export venv_dir
export venv_name
export tools_dir
export venv=${venv_path}/${venv_dir}
if [ $no_site_packages -eq 1 ]; then
installvenvopts="--no-site-packages"
fi
function run_tests {
# Cleanup *pyc
${wrapper} find . -type f -name "*.pyc" -delete
if [ $debug -eq 1 ]; then
if [ "$testropts" = "" ] && [ "$testrargs" = "" ]; then
# Default to running all tests if specific test is not
# provided.
testrargs="discover ./neutron/tests"
fi
${wrapper} python -m testtools.run $testropts $testrargs
# Short circuit because all of the testr and coverage stuff
# below does not make sense when running testtools.run for
# debugging purposes.
return $?
fi
if [ $coverage -eq 1 ]; then
TESTRTESTS="$TESTRTESTS --coverage"
else
TESTRTESTS="$TESTRTESTS --slowest"
fi
# Just run the test suites in current environment
set +e
testrargs=`echo "$testrargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testropts $testrargs'"
OS_TEST_PATH=`echo $testrargs|grep -o 'neutron\.tests[^[:space:]:]*\+'|tr . /`
if [ -d "$OS_TEST_PATH" ]; then
wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper"
elif [ -d "$(dirname $OS_TEST_PATH)" ]; then
wrapper="OS_TEST_PATH=$(dirname $OS_TEST_PATH) $wrapper"
fi
echo "Running \`${wrapper} $TESTRTESTS\`"
bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit"
RESULT=$?
set -e
copy_subunit_log
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
# Don't compute coverage for common code, which is tested elsewhere
${wrapper} coverage combine
${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i
fi
return $RESULT
}
function copy_subunit_log {
LOGNAME=`cat .testrepository/next-stream`
LOGNAME=$(($LOGNAME - 1))
LOGNAME=".testrepository/${LOGNAME}"
cp $LOGNAME subunit.log
}
function run_pep8 {
echo "Running flake8 ..."
${wrapper} flake8
}
TESTRTESTS="python -m neutron.openstack.common.lockutils python setup.py testr"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py $installvenvopts
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py $installvenvopts
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py $installvenvopts
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_pep8 -eq 1 ]; then
run_pep8
exit
fi
if [ $recreate_db -eq 1 ]; then
rm -f tests.sqlite
fi
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
# not when we're running tests individually. To handle this, we need to
# distinguish between options (testropts), which begin with a '-', and
# arguments (testrargs).
if [ -z "$testrargs" ]; then
if [ $no_pep8 -eq 0 ]; then
run_pep8
fi
fi
|
shakamunyi/neutron-vrrp
|
run_tests.sh
|
Shell
|
apache-2.0
| 6,737 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example:
# export KUBERNETES_PROVIDER=mesos/docker
# go run hack/e2e.go -v -up -check_cluster_size=false
# go run hack/e2e.go -v -test -check_version_skew=false
# go run hack/e2e.go -v -down
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
KUBE_ROOT=$(cd "$(dirname "${BASH_SOURCE}")/../../.." && pwd)
provider_root="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}"
source "${provider_root}/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
# Execute a docker-compose command with the default environment and compose file.
function cluster::mesos::docker::docker_compose {
local params="$@"
# All vars required to be set
declare -a env_vars=(
"KUBE_KEYGEN_TIMEOUT"
"MESOS_DOCKER_ETCD_TIMEOUT"
"MESOS_DOCKER_MESOS_TIMEOUT"
"MESOS_DOCKER_API_TIMEOUT"
"MESOS_DOCKER_ADDON_TIMEOUT"
"MESOS_DOCKER_WORK_DIR"
"DOCKER_DAEMON_ARGS"
)
(
for var_name in "${env_vars[@]}"; do
export ${var_name}="${!var_name}"
done
docker-compose -f "${provider_root}/docker-compose.yml" ${params}
)
}
# Pull the images from a docker compose file, if they're not already cached.
# This avoid slow remote calls from `docker-compose pull` which delegates
# to `docker pull` which always hits the remote docker repo, even if the image
# is already cached.
function cluster::mesos::docker::docker_compose_lazy_pull {
for img in $(grep '^\s*image:\s' "${provider_root}/docker-compose.yml" | sed 's/[ \t]*image:[ \t]*//'); do
read repo tag <<<$(echo "${img} "| sed 's/:/ /')
if [ -z "${tag}" ]; then
tag="latest"
fi
if ! docker images "${repo}" | awk '{print $2;}' | grep -q "${tag}"; then
docker pull "${img}"
fi
done
}
# Run kubernetes scripts inside docker.
# This bypasses the need to set up network routing when running docker in a VM (e.g. boot2docker).
# Trap signals and kills the docker container for better signal handing
function cluster::mesos::docker::run_in_docker_test {
local entrypoint="$1"
if [[ "${entrypoint}" = "./"* ]]; then
# relative to project root
entrypoint="/go/src/github.com/GoogleCloudPlatform/kubernetes/${entrypoint}"
fi
shift
local args="$@"
# only mount KUBECONFIG if it exists, otherwise the directory will be created/owned by root
kube_config_mount=""
if [ -n "${KUBECONFIG:-}" ] && [ -e "${KUBECONFIG}" ]; then
kube_config_mount="-v \"$(dirname ${KUBECONFIG}):/root/.kube\""
fi
docker run \
--rm \
-t $(tty &>/dev/null && echo "-i") \
-e "KUBERNETES_PROVIDER=${KUBERNETES_PROVIDER}" \
-v "${KUBE_ROOT}:/go/src/github.com/GoogleCloudPlatform/kubernetes" \
${kube_config_mount} \
-v "/var/run/docker.sock:/var/run/docker.sock" \
--link docker_mesosmaster1_1:mesosmaster1 \
--link docker_apiserver_1:apiserver \
--entrypoint="${entrypoint}" \
mesosphere/kubernetes-mesos-test \
${args}
return "$?"
}
# Run kube-cagen.sh inside docker.
# Creating and signing in the same environment avoids a subject comparison string_mask issue.
function cluster::mesos::docker::run_in_docker_cagen {
local out_dir="$1"
docker run \
--rm \
-t $(tty &>/dev/null && echo "-i") \
-v "${out_dir}:/var/run/kubernetes/auth" \
mesosphere/kubernetes-keygen:v1.0.0 \
"cagen" \
"/var/run/kubernetes/auth"
return "$?"
}
# Run kube-keygen.sh inside docker.
function cluster::mesos::docker::run_in_docker_keygen {
local out_file_path="$1"
local out_dir="$(dirname "${out_file_path}")"
local out_file="$(basename "${out_file_path}")"
docker run \
--rm \
-t $(tty &>/dev/null && echo "-i") \
-v "${out_dir}:/var/run/kubernetes/auth" \
mesosphere/kubernetes-keygen:v1.0.0 \
"keygen" \
"/var/run/kubernetes/auth/${out_file}"
return "$?"
}
# Generate kubeconfig data for the created cluster.
function create-kubeconfig {
local -r auth_dir="${MESOS_DOCKER_WORK_DIR}/auth"
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
export CONTEXT="${KUBERNETES_PROVIDER}"
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
# KUBECONFIG determines the file we write to, but it may not exist yet
if [[ ! -e "${KUBECONFIG}" ]]; then
mkdir -p $(dirname "${KUBECONFIG}")
touch "${KUBECONFIG}"
fi
local token="$(cut -d, -f1 ${auth_dir}/token-users)"
"${kubectl}" config set-cluster "${CONTEXT}" --server="${KUBE_SERVER}" --certificate-authority="${auth_dir}/root-ca.crt"
"${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="cluster-admin"
"${kubectl}" config set-credentials cluster-admin --token="${token}"
"${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}"
echo "Wrote config for ${CONTEXT} to ${KUBECONFIG}" 1>&2
}
# Perform preparations required to run e2e tests
function prepare-e2e {
echo "TODO: prepare-e2e" 1>&2
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
# Make a release
export KUBERNETES_CONTRIB=mesos
export KUBE_RELEASE_RUN_TESTS=N
"${KUBE_ROOT}/build/release.sh"
}
# Must ensure that the following ENV vars are set
function detect-master {
# echo "KUBE_MASTER: $KUBE_MASTER" 1>&2
local docker_id=$(docker ps --filter="name=docker_apiserver" --quiet)
if [[ "${docker_id}" == *'\n'* ]]; then
echo "ERROR: Multiple API Servers running" 1>&2
return 1
fi
master_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" "${docker_id}")
master_port=6443
KUBE_MASTER_IP="${master_ip}:${master_port}"
KUBE_SERVER="https://${KUBE_MASTER_IP}"
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" 1>&2
}
# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
# These Mesos slaves MAY host Kublets,
# but might not have a Kublet running unless a kubernetes task has been scheduled on them.
function detect-nodes {
local docker_ids=$(docker ps --filter="name=docker_mesosslave" --quiet)
if [ -z "${docker_ids}" ]; then
echo "ERROR: Mesos slave(s) not running" 1>&2
return 1
fi
while read -r docker_id; do
local minion_ip=$(docker inspect --format="{{.NetworkSettings.IPAddress}}" "${docker_id}")
KUBE_NODE_IP_ADDRESSES+=("${minion_ip}")
done <<< "$docker_ids"
echo "KUBE_NODE_IP_ADDRESSES: [${KUBE_NODE_IP_ADDRESSES[*]}]" 1>&2
}
# Verify prereqs on host machine
function verify-prereqs {
echo "Verifying required commands" 1>&2
hash docker 2>/dev/null || { echo "Missing required command: docker" 1>&2; exit 1; }
hash docker 2>/dev/null || { echo "Missing required command: docker-compose" 1>&2; exit 1; }
}
# Initialize
function cluster::mesos::docker::init_auth {
local -r auth_dir="${MESOS_DOCKER_WORK_DIR}/auth"
#TODO(karlkfi): reuse existing credentials/certs/keys
# Nuke old auth
echo "Creating Auth Dir: ${auth_dir}" 1>&2
mkdir -p "${auth_dir}"
rm -rf "${auth_dir}"/*
echo "Creating Certificate Authority" 1>&2
cluster::mesos::docker::buffer_output cluster::mesos::docker::run_in_docker_cagen "${auth_dir}"
echo "Certificate Authority Key: ${auth_dir}/root-ca.key" 1>&2
echo "Certificate Authority Cert: ${auth_dir}/root-ca.crt" 1>&2
echo "Creating Service Account RSA Key" 1>&2
cluster::mesos::docker::buffer_output cluster::mesos::docker::run_in_docker_keygen "${auth_dir}/service-accounts.key"
echo "Service Account Key: ${auth_dir}/service-accounts.key" 1>&2
echo "Creating User Accounts" 1>&2
cluster::mesos::docker::create_token_user "cluster-admin" > "${auth_dir}/token-users"
echo "Token Users: ${auth_dir}/token-users" 1>&2
cluster::mesos::docker::create_basic_user "admin" "admin" > "${auth_dir}/basic-users"
echo "Basic-Auth Users: ${auth_dir}/basic-users" 1>&2
}
# Instantiate a kubernetes cluster.
function kube-up {
# Nuke old mesos-slave workspaces
local work_dir="${MESOS_DOCKER_WORK_DIR}/mesosslave"
echo "Creating Mesos Work Dir: ${work_dir}" 1>&2
mkdir -p "${work_dir}"
rm -rf "${work_dir}"/*
# Nuke old logs
local -r log_dir="${MESOS_DOCKER_WORK_DIR}/log"
mkdir -p "${log_dir}"
rm -rf "${log_dir}"/*
# Pull before `docker-compose up` to avoid timeouts caused by slow pulls during deployment.
echo "Pulling Docker images" 1>&2
cluster::mesos::docker::docker_compose_lazy_pull
if [ "${MESOS_DOCKER_SKIP_BUILD}" != "true" ]; then
echo "Building Docker images" 1>&2
# TODO: version images (k8s version, git sha, and dirty state) to avoid re-building them every time.
"${provider_root}/km/build.sh"
"${provider_root}/test/build.sh"
fi
cluster::mesos::docker::init_auth
# Dump logs on premature exit (errexit triggers exit).
# Trap EXIT instead of ERR, because ERR can trigger multiple times with errtrace enabled.
trap "cluster::mesos::docker::dump_logs '${log_dir}'" EXIT
echo "Starting ${KUBERNETES_PROVIDER} cluster" 1>&2
cluster::mesos::docker::docker_compose up -d
echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_NODES} slaves"
cluster::mesos::docker::docker_compose scale mesosslave=${NUM_NODES}
# await-health-check requires GNU timeout
# apiserver hostname resolved by docker
cluster::mesos::docker::run_in_docker_test await-health-check "-t=${MESOS_DOCKER_API_TIMEOUT}" http://apiserver:8888/healthz
detect-master
detect-nodes
create-kubeconfig
echo "Deploying Addons" 1>&2
KUBE_SERVER=${KUBE_SERVER} "${provider_root}/deploy-addons.sh"
# Wait for addons to deploy
cluster::mesos::docker::await_ready "kube-dns" "${MESOS_DOCKER_ADDON_TIMEOUT}"
cluster::mesos::docker::await_ready "kube-ui" "${MESOS_DOCKER_ADDON_TIMEOUT}"
trap - EXIT
}
function validate-cluster {
echo "Validating ${KUBERNETES_PROVIDER} cluster" 1>&2
# Do not validate cluster size. There will be zero k8s minions until a pod is created.
# TODO(karlkfi): use componentstatuses or equivalent when it supports non-localhost core components
# Validate immediate cluster reachability and responsiveness
echo "KubeDNS: $(cluster::mesos::docker::addon_status 'kube-dns')"
echo "KubeUI: $(cluster::mesos::docker::addon_status 'kube-ui')"
}
# Delete a kubernetes cluster
function kube-down {
if [ "${MESOS_DOCKER_DUMP_LOGS}" == "true" ]; then
cluster::mesos::docker::dump_logs "${MESOS_DOCKER_WORK_DIR}/log"
fi
echo "Stopping ${KUBERNETES_PROVIDER} cluster" 1>&2
# Since restoring a stopped cluster is not yet supported, use the nuclear option
cluster::mesos::docker::docker_compose kill
cluster::mesos::docker::docker_compose rm -f
}
function test-setup {
echo "TODO: test-setup" 1>&2
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "test-teardown" 1>&2
kube-down
}
## Below functions used by hack/e2e-suite/services.sh
# SSH to a node by name or IP ($1) and run a command ($2).
function ssh-to-node {
echo "TODO: ssh-to-node" 1>&2
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
echo "TODO: restart-kube-proxy" 1>&2
}
# Restart the apiserver
function restart-apiserver {
echo "TODO: restart-apiserver" 1>&2
}
# Waits for a kube-system pod (of the provided name) to have the phase/status "Running".
function cluster::mesos::docker::await_ready {
local pod_name="$1"
local max_attempts="$2"
local phase="Unknown"
echo -n "${pod_name}: "
local n=0
until [ ${n} -ge ${max_attempts} ]; do
phase=$(cluster::mesos::docker::addon_status "${pod_name}")
if [ "${phase}" == "Running" ]; then
break
fi
echo -n "."
n=$[$n+1]
sleep 1
done
echo "${phase}"
return $([ "${phase}" == "Running" ]; echo $?)
}
# Prints the status of the kube-system pod specified
function cluster::mesos::docker::addon_status {
local pod_name="$1"
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
local phase=$("${kubectl}" get pods --namespace=kube-system -l k8s-app=${pod_name} -o template --template="{{(index .items 0).status.phase}}" 2>/dev/null)
phase="${phase:-Unknown}"
echo "${phase}"
}
function cluster::mesos::docker::dump_logs {
local out_dir="$1"
echo "Dumping logs to '${out_dir}'" 1>&2
mkdir -p "${out_dir}"
while read name; do
docker logs "${name}" &> "${out_dir}/${name}.log"
done < <(cluster::mesos::docker::docker_compose ps -q | xargs docker inspect --format '{{.Name}}')
}
# Creates a k8s token auth user file.
# See /docs/admin/authentication.md
function cluster::mesos::docker::create_token_user {
local user_name="$1"
echo "$(openssl rand -hex 32),${user_name},${user_name}"
}
# Creates a k8s basic auth user file.
# See /docs/admin/authentication.md
function cluster::mesos::docker::create_basic_user {
local user_name="$1"
local password="$2"
echo "${password},${user_name},${user_name}"
}
# Buffers command output to file, prints output on failure.
function cluster::mesos::docker::buffer_output {
local cmd="$@"
local tempfile="$(mktemp "${TMPDIR:-/tmp}/buffer.XXXXXX")"
trap "kill -TERM \${PID}; rm '${tempfile}'" TERM INT
set +e
${cmd} &> "${tempfile}" &
PID=$!
wait ${PID}
trap - TERM INT
wait ${PID}
local exit_status="$?"
set -e
if [ "${exit_status}" != 0 ]; then
cat "${tempfile}" 1>&2
fi
rm "${tempfile}"
return "${exit_status}"
}
|
dcbw/kubernetes
|
cluster/mesos/docker/util.sh
|
Shell
|
apache-2.0
| 13,790 |
#!/bin/sh
./flora_pac -x 'SOCKS5 127.0.0.1:8964; SOCKS 127.0.0.1:8964; DIRECT' -p 8970
|
crysislinux/Flora_Pac
|
test_server.sh
|
Shell
|
mit
| 88 |
# Let's verify that the tools we need are installed
verify_aws_cli() {
declare -a required=(aws)
for cmd in "${required[@]}"; do
command -v $cmd >/dev/null 2>&1 || {
echo "'$cmd' must be installed" >&2
exit 1
}
done
}
#--------------------------------------------------------------------
# Bats modification
#--------------------------------------------------------------------
# This allows us to override a function in Bash
save_function() {
local ORIG_FUNC=$(declare -f $1)
local NEWNAME_FUNC="$2${ORIG_FUNC#$1}"
eval "$NEWNAME_FUNC"
}
# Override the run function so that we always output the output
save_function run old_run
run() {
old_run $@
# Output the command we ran
echo "Executing: " $@
# "$output" gets rid of newlines. This will bring them back.
for line in "${lines[@]}"; do
echo $line
done
}
#--------------------------------------------------------------------
# Helper functions
#--------------------------------------------------------------------
# This sets the directory for fixtures by specifying the name of
# the folder with fixtures.
fixtures() {
FIXTURE_ROOT="$BATS_TEST_DIRNAME/fixtures/$1"
}
# This deletes any AMIs with a tag "packer-test" of "true"
aws_ami_cleanup() {
local region=${1:-us-east-1}
aws ec2 describe-images --region ${region} --owners self --output text \
--filters 'Name=tag:packer-test,Values=true' \
--query 'Images[*].ImageId' \
| xargs -n1 aws ec2 deregister-image --region ${region} --image-id
}
|
dave2/packer
|
test/test_helper.bash
|
Shell
|
mpl-2.0
| 1,585 |
#!/bin/bash
fw_depends urweb mysql
export URWEB_HOME=${IROOT}/urweb
export LD_LIBRARY_PATH=${URWEB_HOME}/lib
${URWEB_HOME}/bin/urweb -dbms mysql -db "dbname=hello_world user=benchmarkdbuser password=benchmarkdbpass host=${DBHOST}" bench
MAX_THREADS=$((2 * $CPU_COUNT))
./bench.exe -q -k -t ${MAX_THREADS} &
|
mfirry/FrameworkBenchmarks
|
frameworks/Ur/urweb/setup_mysql.sh
|
Shell
|
bsd-3-clause
| 311 |
#!/usr/bin/env bash
set -e
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: ${CONFIG:="${possibleConfigs[0]}"}
fi
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
declare -A colors=(
[black]=30
[red]=31
[green]=32
[yellow]=33
[blue]=34
[magenta]=35
[cyan]=36
[white]=37
)
color() {
color=()
if [ "$1" = 'bold' ]; then
color+=( '1' )
shift
fi
if [ $# -gt 0 ] && [ "${colors[$1]}" ]; then
color+=( "${colors[$1]}" )
fi
local IFS=';'
echo -en '\033['"${color[*]}"m
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set "$1"; then
wrap_good "CONFIG_$1" 'enabled'
else
wrap_bad "CONFIG_$1" 'missing'
fi
}
check_flags() {
for flag in "$@"; do
echo "- $(check_flag "$flag")"
done
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config..."
for tryConfig in "${possibleConfigs[@]}"; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')"
fi
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &> /dev/null; then
echo "$(wrap_good 'apparmor' 'enabled and tools installed')"
else
echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')"
echo -n ' '
if command -v apt-get &> /dev/null; then
echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')"
elif command -v yum &> /dev/null; then
echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')"
else
echo "$(wrap_color '(look for an "apparmor" package for your distribution)')"
fi
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS
MACVLAN VETH BRIDGE
NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK}
NF_NAT NF_NAT_NEEDED
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
echo
echo 'Optional Features:'
{
check_flags MEMCG_SWAP
check_flags MEMCG_SWAP_ENABLED
if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
}
flags=(
RESOURCE_COUNTERS
CGROUP_PERF
CFS_BANDWIDTH
)
check_flags "${flags[@]}"
echo '- Storage Drivers:'
{
echo '- "'$(wrap_color 'aufs' blue)'":'
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
check_flags EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /'
echo '- "'$(wrap_color 'btrfs' blue)'":'
check_flags BTRFS_FS | sed 's/^/ /'
echo '- "'$(wrap_color 'devicemapper' blue)'":'
check_flags BLK_DEV_DM DM_THIN_PROVISIONING EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY | sed 's/^/ /'
echo '- "'$(wrap_color 'overlay' blue)'":'
check_flags OVERLAY_FS EXT4_FS_SECURITY EXT4_FS_POSIX_ACL | sed 's/^/ /'
} | sed 's/^/ /'
echo
#echo 'Potential Future Features:'
#check_flags USER_NS
#echo
|
Ninir/docker
|
contrib/check-config.sh
|
Shell
|
apache-2.0
| 4,893 |
#!/bin/bash
export PHP_HOME=${IROOT}/php-5.5.17
export COMPOSER_HOME=${IROOT}/php-composer
export PHP_FPM=${PHP_HOME}/sbin/php-fpm
export NGINX_HOME=${IROOT}/nginx
sed -i 's|127.0.0.1|'"${DBHOST}"'|g' apps/configs/database.php
sed -i 's|root .*/FrameworkBenchmarks/cygnite-php-framework|root '"${TROOT}"'|g' deploy/nginx.conf
sed -i 's|/usr/local/nginx/|'"${IROOT}"'/nginx/|g' deploy/nginx.conf
export PATH="${PHP_HOME}/bin:${PHP_HOME}/sbin:$PATH"
$PHP_FPM --fpm-config ${FWROOT}/config/php-fpm.conf -g ${TROOT}/deploy/php-fpm.pid
${NGINX_HOME}/sbin/nginx -c ${TROOT}/deploy/nginx.conf
|
fabianmurariu/FrameworkBenchmarks
|
frameworks/PHP/cygnite-php-framework/setup.sh
|
Shell
|
bsd-3-clause
| 590 |
#
# -- START --
# preremove.solaris.sh,v 1.1 2001/08/21 20:33:17 root Exp
#
# This is the shell script that does the preremove
echo RUNNING preremove.solaris.sh
if [ "$VERBOSE_INSTALL" != "" ] ; then set -x; fi
echo "Stopping LPD"
pkill -INT lpd
exit 0
|
alcobar/asuswrt-merlin
|
release/src/router/LPRng/preremove.solaris.sh
|
Shell
|
gpl-2.0
| 253 |
#!/bin/bash
# Copyright (c) 2013 Intel Corporation.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Authors:
# IVAN CHEN <[email protected]>
local_path=$(cd "$(dirname $0)";pwd)
#monitor sys
WEBAPP_PACKAGE="org.xwalk.playmusic"
SLEEP=14400
$local_path/sysmon.sh `(basename $0)` $SLEEP $WEBAPP_PACKAGE &
#launch app
adb shell am start -a android.intent.action.View -n $WEBAPP_PACKAGE/.PlaymusicActivity &
sleep $(($SLEEP + 1 ))
#kill test case progress
adb shell am force-stop $WEBAPP_PACKAGE
|
yugang/crosswalk-test-suite
|
stability/wrt-stablonglast-android-tests/stablonglast/RunPlayMusicLongTime.sh
|
Shell
|
bsd-3-clause
| 1,885 |
#!/bin/sh
if [ $# -ne 2 ]; then
echo "Usage: $0 iface newmac"
echo " newmac is only saved if /etc/<iface>MAC is not found"
exit 1
fi
iface="$1"
newmac="$2"
macfile=/etc/"$iface"mac
# If no MAC is found, save the one given as argument
if [ ! -e $macfile ]; then
/bin/echo "$newmac" > $macfile
# Otherwise load MAC from file
else
newmac=`/bin/cat $macfile`
fi
# Configure interface
/sbin/ifconfig "$iface" down
/sbin/ifconfig "$iface" hw ether $newmac
|
openembedded/openembedded
|
recipes/udev/udev/nokia900/nokia-n900-mac-hack.sh
|
Shell
|
mit
| 469 |
#!/bin/sh
# AIK-Linux/unpackimg: split image and unpack ramdisk
# osm0sis @ xda-developers
cleanup() { $sudo$rmsu rm -rf ramdisk split_img *new.*; }
abort() { cd "$aik"; echo "Error!"; }
case $1 in
--sudo) sudo=sudo; sumsg=" (as root)"; shift;;
esac;
aik="${BASH_SOURCE:-$0}";
aik="$(dirname "$(readlink -f "$aik")")";
cd "$aik";
chmod -R 755 bin *.sh;
chmod 644 bin/magic;
arch=`uname -m`;
img="$1";
if [ ! "$img" ]; then
for i in *.img; do
test "$i" = "image-new.img" && continue;
img="$i"; break;
done;
fi;
if [ ! -f "$img" ]; then
echo "No image file supplied.";
abort;
exit 1;
fi;
clear;
echo " ";
echo "Android Image Kitchen - UnpackImg Script";
echo "by osm0sis @ xda-developers";
echo " ";
file=$(basename "$img");
echo "Supplied image: $file";
echo " ";
if [ -d split_img -o -d ramdisk ]; then
if [ ! -z "$(ls ramdisk/* 2> /dev/null)" ] && [ "$(stat -c %U ramdisk/* | head -n 1)" = "root" ]; then
test ! "$sudo" && rmsu=sudo; rmsumsg=" (as root)";
fi;
echo "Removing old work folders and files$rmsumsg...";
echo " ";
cleanup;
fi;
echo "Setting up work folders...";
echo " ";
mkdir split_img ramdisk;
echo 'Splitting image to "split_img/"...';
bin/$arch/unpackbootimg -i "$img" -o split_img;
if [ ! $? -eq "0" ]; then
cleanup;
abort;
exit 1;
fi;
cd split_img;
file -m ../bin/magic *-ramdisk.gz | cut -d: -f2 | awk '{ print $1 }' > "$file-ramdiskcomp";
ramdiskcomp=`cat *-ramdiskcomp`;
unpackcmd="$ramdiskcomp -dc";
compext=$ramdiskcomp;
case $ramdiskcomp in
gzip) compext=gz;;
lzop) compext=lzo;;
xz) ;;
lzma) ;;
bzip2) compext=bz2;;
lz4) unpackcmd="../bin/$arch/lz4 -dq"; extra="stdout";;
*) compext="";;
esac;
if [ "$compext" ]; then
compext=.$compext;
fi;
mv "$file-ramdisk.gz" "$file-ramdisk.cpio$compext";
cd ..;
echo " ";
echo "Unpacking ramdisk$sumsg to \"ramdisk/\"...";
echo " ";
cd ramdisk;
echo "Compression used: $ramdiskcomp";
if [ ! "$compext" ]; then
abort;
exit 1;
fi;
$unpackcmd "../split_img/$file-ramdisk.cpio$compext" $extra | $sudo cpio -i;
if [ ! $? -eq "0" ]; then
abort;
exit 1;
fi;
cd ..;
echo " ";
echo "Done!";
exit 0;
|
morogoku/MoRoKernel-S7-v2
|
ramdisk/unpackimg.sh
|
Shell
|
gpl-2.0
| 2,133 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "3 nodes, no IPs assigned, all healthy, all in STARTUP runstate"
export CTDB_TEST_LOGLEVEL=2
required_result <<EOF
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.21.254
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.21.253
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.21.252
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.20.254
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.20.253
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.20.252
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.20.251
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.20.250
${TEST_DATE_STAMP}Failed to find node to cover ip 192.168.20.249
192.168.21.254 -1
192.168.21.253 -1
192.168.21.252 -1
192.168.20.254 -1
192.168.20.253 -1
192.168.20.252 -1
192.168.20.251 -1
192.168.20.250 -1
192.168.20.249 -1
EOF
export CTDB_TEST_RUNSTATE=4,4,4
simple_test 0,0,0 <<EOF
192.168.21.254 -1
192.168.21.253 -1
192.168.21.252 -1
192.168.20.254 -1
192.168.20.253 -1
192.168.20.252 -1
192.168.20.251 -1
192.168.20.250 -1
192.168.20.249 -1
EOF
|
SpectraLogic/samba
|
ctdb/tests/takeover/lcp2.024.sh
|
Shell
|
gpl-3.0
| 1,148 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd)
CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
INPUT_IMG=$1
MODEL_DIR=$2
OUTPUT_DIR=$3
GPU=0
java -Xmx1024m -cp $CLASS_PATH \
ml.dmlc.mxnetexamples.neuralstyle.end2end.BoostInference \
--model-path $MODEL_DIR \
--input-image $INPUT_IMG \
--output-path $OUTPUT_DIR \
--gpu $GPU
|
ShownX/incubator-mxnet
|
scala-package/examples/scripts/neuralstyle_end2end/run_test_end2end.sh
|
Shell
|
apache-2.0
| 1,259 |
#!/bin/sh
#
# Check if current architecture are missing any function calls compared
# to i386.
# i386 define a number of legacy system calls that are i386 specific
# and listed below so they are ignored.
#
# Usage:
# syscallchk gcc gcc-options
#
ignore_list() {
cat << EOF
#include <asm/types.h>
#include <asm/unistd.h>
/* System calls for 32-bit kernels only */
#if BITS_PER_LONG == 64
#define __IGNORE_sendfile64
#define __IGNORE_ftruncate64
#define __IGNORE_truncate64
#define __IGNORE_stat64
#define __IGNORE_lstat64
#define __IGNORE_fstat64
#define __IGNORE_fcntl64
#define __IGNORE_fadvise64_64
#define __IGNORE_fstatat64
#define __IGNORE_fstatfs64
#define __IGNORE_statfs64
#endif
/* i386-specific or historical system calls */
#define __IGNORE_break
#define __IGNORE_stty
#define __IGNORE_gtty
#define __IGNORE_ftime
#define __IGNORE_prof
#define __IGNORE_lock
#define __IGNORE_mpx
#define __IGNORE_ulimit
#define __IGNORE_profil
#define __IGNORE_ioperm
#define __IGNORE_iopl
#define __IGNORE_idle
#define __IGNORE_modify_ldt
#define __IGNORE_ugetrlimit
#define __IGNORE_mmap2
#define __IGNORE_vm86
#define __IGNORE_vm86old
#define __IGNORE_set_thread_area
#define __IGNORE_get_thread_area
#define __IGNORE_madvise1
#define __IGNORE_oldstat
#define __IGNORE_oldfstat
#define __IGNORE_oldlstat
#define __IGNORE_oldolduname
#define __IGNORE_olduname
#define __IGNORE_umount2
#define __IGNORE_umount
#define __IGNORE_waitpid
#define __IGNORE_stime
#define __IGNORE_nice
#define __IGNORE_signal
#define __IGNORE_sigaction
#define __IGNORE_sgetmask
#define __IGNORE_sigsuspend
#define __IGNORE_sigpending
#define __IGNORE_ssetmask
#define __IGNORE_readdir
#define __IGNORE_socketcall
#define __IGNORE_ipc
#define __IGNORE_sigreturn
#define __IGNORE_sigprocmask
#define __IGNORE_bdflush
#define __IGNORE__llseek
#define __IGNORE__newselect
#define __IGNORE_create_module
#define __IGNORE_delete_module
#define __IGNORE_query_module
#define __IGNORE_get_kernel_syms
/* ... including the "new" 32-bit uid syscalls */
#define __IGNORE_lchown32
#define __IGNORE_getuid32
#define __IGNORE_getgid32
#define __IGNORE_geteuid32
#define __IGNORE_getegid32
#define __IGNORE_setreuid32
#define __IGNORE_setregid32
#define __IGNORE_getgroups32
#define __IGNORE_setgroups32
#define __IGNORE_fchown32
#define __IGNORE_setresuid32
#define __IGNORE_getresuid32
#define __IGNORE_setresgid32
#define __IGNORE_getresgid32
#define __IGNORE_chown32
#define __IGNORE_setuid32
#define __IGNORE_setgid32
#define __IGNORE_setfsuid32
#define __IGNORE_setfsgid32
/* sync_file_range had a stupid ABI. Allow sync_file_range2 instead */
#ifdef __NR_sync_file_range2
#define __IGNORE_sync_file_range
#endif
/* Unmerged syscalls for AFS, STREAMS, etc. */
#define __IGNORE_afs_syscall
#define __IGNORE_getpmsg
#define __IGNORE_putpmsg
#define __IGNORE_vserver
EOF
}
syscall_list() {
sed -n -e '/^\#define/ { s/[^_]*__NR_\([^[:space:]]*\).*/\
\#if !defined \(__NR_\1\) \&\& !defined \(__IGNORE_\1\)\
\#warning syscall \1 not implemented\
\#endif/p }' $1
}
(ignore_list && syscall_list ${srctree}/include/asm-x86/unistd_32.h) | \
$* -E -x c - > /dev/null
|
blakearnold/MPR
|
scripts/checksyscalls.sh
|
Shell
|
gpl-2.0
| 3,129 |
#!/bin/bash
mkdir -p $PREFIX/bin
chmod +x proTRAC_$PKG_VERSION.pl
perl -i -wpe 's/\r/\n/g' proTRAC_$PKG_VERSION.pl
perl -i -wpe 's/\/perl/\/env perl/' proTRAC_$PKG_VERSION.pl
cp proTRAC_$PKG_VERSION.pl $PREFIX/bin
|
dmaticzka/bioconda-recipes
|
recipes/protrac/build.sh
|
Shell
|
mit
| 216 |
#!/bin/bash
#
# SPDX-License-Identifier: GPL-2.0
# gen_kselftest_tar
# Generate kselftest tarball
# Author: Shuah Khan <[email protected]>
# Copyright (C) 2015 Samsung Electronics Co., Ltd.
# main
main()
{
if [ "$#" -eq 0 ]; then
echo "$0: Generating default compression gzip"
copts="cvzf"
ext=".tar.gz"
else
case "$1" in
tar)
copts="cvf"
ext=".tar"
;;
targz)
copts="cvzf"
ext=".tar.gz"
;;
tarbz2)
copts="cvjf"
ext=".tar.bz2"
;;
tarxz)
copts="cvJf"
ext=".tar.xz"
;;
*)
echo "Unknown tarball format $1"
exit 1
;;
esac
fi
# Create working directory.
dest=`pwd`
install_work="$dest"/kselftest_install
install_name=kselftest
install_dir="$install_work"/"$install_name"
mkdir -p "$install_dir"
# Run install using INSTALL_KSFT_PATH override to generate install
# directory
./kselftest_install.sh "$install_dir"
(cd "$install_work"; tar $copts "$dest"/kselftest${ext} $install_name)
echo "Kselftest archive kselftest${ext} created!"
# clean up top-level install work directory
rm -rf "$install_work"
}
main "$@"
|
Pingmin/linux
|
tools/testing/selftests/gen_kselftest_tar.sh
|
Shell
|
gpl-2.0
| 1,114 |
#!/bin/sh
#
# Copyright (c) 2006 Eric Wong
#
test_description='git apply should not get confused with type changes.
'
. ./test-lib.sh
test_expect_success SYMLINKS 'setup repository and commits' '
echo "hello world" > foo &&
echo "hi planet" > bar &&
git update-index --add foo bar &&
git commit -m initial &&
git branch initial &&
rm -f foo &&
ln -s bar foo &&
git update-index foo &&
git commit -m "foo symlinked to bar" &&
git branch foo-symlinked-to-bar &&
rm -f foo &&
echo "how far is the sun?" > foo &&
git update-index foo &&
git commit -m "foo back to file" &&
git branch foo-back-to-file &&
printf "\0" > foo &&
git update-index foo &&
git commit -m "foo becomes binary" &&
git branch foo-becomes-binary &&
rm -f foo &&
git update-index --remove foo &&
mkdir foo &&
echo "if only I knew" > foo/baz &&
git update-index --add foo/baz &&
git commit -m "foo becomes a directory" &&
git branch "foo-becomes-a-directory" &&
echo "hello world" > foo/baz &&
git update-index foo/baz &&
git commit -m "foo/baz is the original foo" &&
git branch foo-baz-renamed-from-foo
'
test_expect_success SYMLINKS 'file renamed from foo to foo/baz' '
git checkout -f initial &&
git diff-tree -M -p HEAD foo-baz-renamed-from-foo > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'file renamed from foo/baz to foo' '
git checkout -f foo-baz-renamed-from-foo &&
git diff-tree -M -p HEAD initial > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'directory becomes file' '
git checkout -f foo-becomes-a-directory &&
git diff-tree -p HEAD initial > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'file becomes directory' '
git checkout -f initial &&
git diff-tree -p HEAD foo-becomes-a-directory > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'file becomes symlink' '
git checkout -f initial &&
git diff-tree -p HEAD foo-symlinked-to-bar > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'symlink becomes file' '
git checkout -f foo-symlinked-to-bar &&
git diff-tree -p HEAD foo-back-to-file > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'binary file becomes symlink' '
git checkout -f foo-becomes-binary &&
git diff-tree -p --binary HEAD foo-symlinked-to-bar > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'symlink becomes binary file' '
git checkout -f foo-symlinked-to-bar &&
git diff-tree -p --binary HEAD foo-becomes-binary > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'symlink becomes directory' '
git checkout -f foo-symlinked-to-bar &&
git diff-tree -p HEAD foo-becomes-a-directory > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_expect_success SYMLINKS 'directory becomes symlink' '
git checkout -f foo-becomes-a-directory &&
git diff-tree -p HEAD foo-symlinked-to-bar > patch &&
git apply --index < patch
'
test_debug 'cat patch'
test_done
|
TextusData/Mover
|
thirdparty/git-1.7.11.3/t/t4114-apply-typechange.sh
|
Shell
|
gpl-3.0
| 3,189 |
echo "which blog repo (subtree)? "
read $repo
git subtree push --prefix=contents/article/$repo $repo master
|
maps-on-blackboard/maps-on-blackboard-wintersmith
|
push-subtree-blog-repo.sh
|
Shell
|
mit
| 108 |
#!/bin/bash
LOTE=$1
$BINDIR/simusched $LOTE 1 1 0 SchedRR 5 > "$NOMBRE.dat"
|
lvuotto/so-tps
|
tp1/benchmark/ej4-1.sh
|
Shell
|
mit
| 77 |
#!/bin/bash
DB_PARAMS=~/RandomMetroidSolver/db_params.py
function get_db_param {
PARAM="$1"
sed -e "s+.*${PARAM}='\([^']*\)'.*+\1+" ${DB_PARAMS}
}
function get_pending_seeds {
SQL="select guid from randomizer where upload_status = 'pending';"
echo "${SQL}" | mysql --skip-column-names --silent -h ${host} -u ${user} -p${password} ${database}
}
function update_seed_status {
local KEY="${1}"
local STATUS="${2}"
SQL="update randomizer set upload_status = '${STATUS}' where guid = '${KEY}';"
echo "${SQL}" | mysql --skip-column-names --silent -h ${host} -u ${user} -p${password} ${database}
}
cd ~/varia_repository
# get list of pending upload seeds
export host=$(get_db_param "host")
export user=$(get_db_param "user")
export database=$(get_db_param "database")
export password=$(get_db_param "password")
KEYS=$(get_pending_seeds)
# add them to git
for KEY in ${KEYS}; do
git add ${KEY}
update_seed_status "${KEY}" "uploaded"
done
git commit -m "daily commit" .
git push
# delete non git older than retention
RETENTION_DAYS=7
git status . | grep -E '[0-9a-z\-]+/' | while read KEY; do
if [ -n "$(find "${KEY}" -mtime +${RETENTION_DAYS})" ]; then
echo "delete ${KEY}"
rm -rf "${KEY}"
KEY=$(echo "${KEY}" | sed -e 's+/$++')
update_seed_status "${KEY}" "deleted"
fi
done
|
theonlydude/RandomMetroidSolver
|
tools/archive_ips.sh
|
Shell
|
mit
| 1,357 |
#!/bin/bash
KEYWORDS_BIBLE="Bible|biblical|Adam(| )(&|and)(| )Eve|Book(| )of(| )(Genesis|Proverbs)|\bEden\b|Isaiah|Israelites|Goliath|Philistine|Tribe(|s)(| )of(| )Judah|Leviticus|Deuteronomy|King(| )James(| )Version"
KEYWORDS_BIBLE_CASESENSITIVE="\bJob\b"
KEYWORDS_BIBLE_EXCLUDE="Goliath(| )(beetle|chronicle)"
KEYWORDS_BIBLE_ALL="$KEYWORDS_BIBLE|$KEYWORDS_BIBLE_CASESENSITIVE"
if [ "$1" == "" ];
then
debug_start "The Bible"
BIBLE=$(egrep -i "$KEYWORDS_BIBLE" "$NEWPAGES"| egrep -iv "$KEYWORDS_BIBLE_EXCLUDE"; egrep "$KEYWORDS_BIBLE_CASESENSITIVE" "$NEWPAGES" | egrep -iv "$KEYWORDS_BIBLE_EXCLUDE")
categorize "BIBLE" "The Bible"
debug_end "The Bible"
fi
|
MW-autocat-script/MW-autocat-script
|
catscripts/Lifestyle/Religion/The_Bible/TheBible.sh
|
Shell
|
mit
| 673 |
#!/bin/bash
DB=${1:-test_badchar}
echo "========================================"
echo "Start: `date`"
START="`date +%s`"
echo "========================================"
TAB_COL_ALL=$( echo "select search_for_non_utf8_columns();" | psql ${DB} | perl -ne 'print STDERR $_; if (/^ \(/ ) { s/[(),]/ /g; @x = split(/\s+/,$_); print "'\''$x[2]'\'','\''$x[3]'\''\n";}' )
echo "$TAB_COL_ALL"
echo "========================================"
echo "Initial search completed: `date`"
SEARCHDONE="`date +%s`"
echo "Search run time $(( $SEARCHDONE - $START )) seconds"
echo "========================================"
for TAB_COL in $TAB_COL_ALL; do
echo "processing: $TAB_COL"
psql -e ${DB} 2>&1 <<EOF | grep -v -E 'CONTEXT:|PL/pgSQL'
SET session_replication_role = replica;
SELECT process_non_utf8_at_column($TAB_COL);
SET session_replication_role = DEFAULT;
EOF
done
echo "========================================"
echo "Update complete: `date`"
UPDATED="`date +%s`"
echo "Update run time $(( $UPDATED - $SEARCHDONE )) seconds"
echo "========================================"
echo "========= checking results ============="
echo "select search_for_non_utf8_columns(show_timestamps := false);" | psql ${DB}
echo "========================================"
echo "Finish: `date`"
FINISH="`date +%s`"
echo "Total run time $(( $FINISH - $START )) seconds"
echo "========================================"
|
sammcj/sql_ascii_to_utf8
|
run_process_non_utf8.sh
|
Shell
|
mit
| 1,404 |
read a;read b;MSG="You can win";for i in `seq 1 ${#b}`;do m=`echo "$a"| cut -c $i`;n=`echo "$b"| cut -c $i`;[ ! "$m" = "$n" ] && { [[ "$m" =~ [^atcoder@] ]] || [[ "$n" =~ [^atcoder@] ]] && MSG="You will lose" && break;[ ! "$m" = "@" ] && [[ "$n" =~ [atcoder] ]] && MSG="You will lose" && break;[ ! "$n" = "@" ] && [[ "$m" =~ [atcoder] ]] && MSG="You will lose" && break;};done;echo $MSG
|
jmatsu/BashCoder
|
abc/003/b.sh
|
Shell
|
mit
| 386 |
# Disable press-and-hold for keys in favor of key repeat.
defaults write -g ApplePressAndHoldEnabled -bool false
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Always open everything in Finder's list view. This is important.
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder.
chflags nohidden ~/Library
# Set a really fast key repeat.
defaults write NSGlobalDomain KeyRepeat -int 0
# Set the Finder prefs for showing a few different volumes on the Desktop.
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Hide Safari's bookmark bar.
defaults write com.apple.Safari ShowFavoritesBar -bool false
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Automatically quit printer app once the print jobs complete
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
# Disable the crash reporter
defaults write com.apple.CrashReporter DialogType -string "none"
# Restart automatically if the computer freezes
sudo systemsetup -setrestartfreeze on
# Check for software updates daily, not just once per week
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
# Disable the sudden motion sensor as it’s not useful for SSDs
sudo pmset -a sms 0
# Trackpad: map bottom right corner to right-click
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadCornerSecondaryClick -int 2
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadRightClick -bool true
defaults -currentHost write NSGlobalDomain com.apple.trackpad.trackpadCornerClickBehavior -int 1
defaults -currentHost write NSGlobalDomain com.apple.trackpad.enableSecondaryClick -bool true
# Set the timezone; see `sudo systemsetup -listtimezones` for other values
sudo systemsetup -settimezone "Europe/Dublin" > /dev/null
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "${HOME}/Desktop"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
# Finder: disable window animations and Get Info animations
defaults write com.apple.finder DisableAllAnimations -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Disable disk image verification
defaults write com.apple.frameworks.diskimages skip-verify -bool true
defaults write com.apple.frameworks.diskimages skip-verify-locked -bool true
defaults write com.apple.frameworks.diskimages skip-verify-remote -bool true
# Empty Trash securely by default
defaults write com.apple.finder EmptyTrashSecurely -bool true
# Don’t animate opening applications from the Dock
defaults write com.apple.dock launchanim -bool false
# Enable the WebKit Developer Tools in the Mac App Store
defaults write com.apple.appstore WebKitDeveloperExtras -bool true
# Disable the all too sensitive backswipe
defaults write com.google.Chrome AppleEnableSwipeNavigateWithScrolls -bool false
defaults write com.google.Chrome.canary AppleEnableSwipeNavigateWithScrolls -bool false
|
zeroDivisible/new-machine-bootstrap
|
scripts/osx-defaults.sh
|
Shell
|
mit
| 3,386 |
#!/bin/bash
systemctl restart outages.weenhanceit.com
|
weenhanceit/outages
|
scripts/ApplicationStart.sh
|
Shell
|
mit
| 54 |
#!/bin/bash
ln -s -r $1 enabled_modules/
|
JimboMonkey1234/pushserver
|
enable.sh
|
Shell
|
mit
| 42 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3335-1
#
# Security announcement date: 2015-08-13 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:31 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - request-tracker4:4.0.7-5+deb7u4
# - rt4-clients:4.0.7-5+deb7u4
# - rt4-fcgi:4.0.7-5+deb7u4
# - rt4-apache2:4.0.7-5+deb7u4
# - rt4-db-postgresql:4.0.7-5+deb7u4
# - rt4-db-mysql:4.0.7-5+deb7u4
# - rt4-db-sqlite:4.0.7-5+deb7u4
#
# Last versions recommanded by security team:
# - request-tracker4:4.0.7-5+deb7u4
# - rt4-clients:4.0.7-5+deb7u4
# - rt4-fcgi:4.0.7-5+deb7u4
# - rt4-apache2:4.0.7-5+deb7u4
# - rt4-db-postgresql:4.0.7-5+deb7u4
# - rt4-db-mysql:4.0.7-5+deb7u4
# - rt4-db-sqlite:4.0.7-5+deb7u4
#
# CVE List:
# - CVE-2015-5475
# - CVE-2015-6506
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade request-tracker4=4.0.7-5+deb7u4 -y
sudo apt-get install --only-upgrade rt4-clients=4.0.7-5+deb7u4 -y
sudo apt-get install --only-upgrade rt4-fcgi=4.0.7-5+deb7u4 -y
sudo apt-get install --only-upgrade rt4-apache2=4.0.7-5+deb7u4 -y
sudo apt-get install --only-upgrade rt4-db-postgresql=4.0.7-5+deb7u4 -y
sudo apt-get install --only-upgrade rt4-db-mysql=4.0.7-5+deb7u4 -y
sudo apt-get install --only-upgrade rt4-db-sqlite=4.0.7-5+deb7u4 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/armv7l/2015/DSA-3335-1.sh
|
Shell
|
mit
| 1,471 |
#!/usr/bin/env bash
usage(){
echo
echo " Usage: fsplate <template> [destination]"
echo
echo " Options:"
echo
echo " -f, --force write over any existing files"
echo " -h, --help show this help message"
echo " -V, --version print version number"
echo
echo " Note: you will be prompted for any values not in config"
echo " and no files will be overwritten without your confirmation"
echo
echo " Examples: "
echo
echo " # simple one file"
echo " \$ fsplate Makefile"
echo
echo " # target a sub-directory"
echo " \$ fsplate component myproject"
echo
}
parse_vars(){
cat | grep -o {{[^}]*}} | sed -e 's/^{{//' -e 's/}}$//'
}
substitute(){
local str=$(cat)
for var in $(echo $str | grep -o {{[^}]*}}); do
key="${var:2:-2}"
str="$(echo "$str" | sed "s/$var/${dict[$key]}/")"
done
echo "$str"
}
copyFile(){
mkdir -p "$(dirname $2)"
if [[ -e "$2" && ! $force ]]; then
read -p "${2/$HOME/~} exists. (y=overwrite, n=skip, r=rename): " -n 1
[[ "$REPLY" ]] && echo >&2
case $REPLY in
y|Y)
echo "overwriting ${2/$HOME/~}"
;;
n|N)
echo "skipping ${2/$HOME/~}" >&2
return 0
;;
r|R)
read -p "Enter alternative name for '$(basename $2)': "
[[ "$REPLY" ]] && echo >&2
copyFile "$1" "$(dirname $2)/${REPLY:-$(basename $2)}"
return $?
;;
*)
echo "Try again" >&2
copyFile "$1" "$2"
return $?
;;
esac
fi
substitute < "$1" > "$2"
}
for arg in $@; do
case $arg in
-h|--help)
usage
exit 1
;;
-f|--force)
force=true
shift
;;
-V|--version) echo "0.0.1"; exit 0;;
-*)
echo "unknown option '$arg'" >&2
usage
exit 1
;;
esac
done
[[ ! "$1" ]] && usage && exit 1
# load data
ifs=$IFS
IFS=$'\n'
declare -A dict
for line in $(egrep -o ".+: .+" < "$HOME/.fsplates/.config"); do
key=${line%%": "*}
val=${line#*": "*}
dict[$key]=$val
done
IFS="$ifs"
template="$(realpath -es ${HOME}/.fsplates/$1)"
destination="$(realpath ${2:-$PWD})"
if [[ -d "$template" ]]; then
base="$template"
else
base="$(dirname $template)"
fi
for file in $(find -L $template -type f); do
name="$destination${file#$base}"
vars="$(parse_vars < $file) $(echo $name | parse_vars)"
# prompt user for all undefined vars
for var in $vars; do
if [[ ! ${dict[$var]+_} ]]; then
read -p "$var: "
dict[$var]="$REPLY"
fi
done
copyFile "$file" "$(echo $name | substitute)"
done
|
jkroso/fs-plates
|
fsplates.sh
|
Shell
|
mit
| 2,584 |
#!/bin/bash
#xhost + local:
#ARDU_PATH=`cd ../../ardupilot && pwd`
#echo $ARDU_PATH
#docker run -it --rm --name sitl_run -v $ARDU_PATH:/ardupilot -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY sitl_image /bin/bash
DRONE_LAB_DIR=`cd ../../ && pwd`
docker run -it --rm \
-v $DRONE_LAB_DIR:/dronelab \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v $DRONE_LAB_DIR/dockers/docker_home:/home/docker \
-e DISPLAY=$DISPLAY \
-e USERNAME=docker \
-e USER=docker \
-e SITL_POSITION_PORT=19988 \
-e HOME=/home/docker \
-u $UID \
sitl_image "/bin/bash"
#cd /dronelab/ardupilot/ArduCopter/ && ../Tools/autotest/sim_vehicle.py -w
|
orig74/DroneSimLab
|
dockers/sitl_image/run_image.sh
|
Shell
|
mit
| 618 |
#!/bin/bash
# @TODO Create symlinks or shortcuts to library files (Research what gets auto-included by composer and what doesn't
XDIR=$(pwd)
echo "Currenty @ ${XDIR}..."
# Create symlinks
ln -s
|
uchilaka/com.larcity.codeigniter.shell
|
dist/build.sh
|
Shell
|
mit
| 196 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2351-1
#
# Security announcement date: 2011-11-21 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:19 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - wireshark:1.2.11-6+squeeze5
#
# Last versions recommanded by security team:
# - wireshark:1.8.2-5wheezy16~deb6u1
#
# CVE List:
# - CVE-2011-4102
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade wireshark=1.8.2-5wheezy16~deb6u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2011/DSA-2351-1.sh
|
Shell
|
mit
| 640 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2249-1
#
# Security announcement date: 2011-05-31 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:15 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - jabberd14:1.6.1.1-5+squeeze1
#
# Last versions recommanded by security team:
# - jabberd14:1.6.1.1-5+squeeze1
#
# CVE List:
# - CVE-2011-1754
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade jabberd14=1.6.1.1-5+squeeze1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2011/DSA-2249-1.sh
|
Shell
|
mit
| 631 |
#!/usr/bin/env
echo "$OSTYPE"
if [[ "$OSTYPE" == 'msys' ]]; then
echo 'OS is Windows. Setting npm script-shell to bash'
if test -f 'C:/Program Files/git/bin/bash.exe'; then
npm config set script-shell 'C:/Program Files/git/bin/bash.exe'
echo 'script-shell set to C:/Program Files/git/bin/bash.exe'
elif test -f 'C:/Program Files (x86)/git/bin/bash.exe'; then
npm config set script-shell 'C:/Program Files (x86)/git/bin/bash.exe'
echo 'script-shell set to C:/Program Files (x86)/git/bin/bash.exe'
else
error_exit 'git is not installed!'
fi
fi
if test -f '.gitignore'; then
git mv .gitignore .gitignore.back
npm run build
git add -- ./lib
git commit -m 'Add built files' -- ./lib
git mv .gitignore.back .gitignore
cd ./lib
git ls-files -z | xargs -0 git update-index --assume-unchanged
else
error_exit '.gitignore does not exist!'
fi
|
JunoLab/atom-indent-detective
|
ppublish.sh
|
Shell
|
mit
| 859 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:0301
#
# Security announcement date: 2015-03-05 14:52:45 UTC
# Script generation date: 2017-01-01 21:15:59 UTC
#
# Operating System: Red Hat 7
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - hivex.i686:1.3.10-5.7.el7
# - hivex.x86_64:1.3.10-5.7.el7
# - hivex-debuginfo.i686:1.3.10-5.7.el7
# - hivex-debuginfo.x86_64:1.3.10-5.7.el7
# - perl-hivex.x86_64:1.3.10-5.7.el7
# - hivex-devel.i686:1.3.10-5.7.el7
# - hivex-devel.x86_64:1.3.10-5.7.el7
# - ocaml-hivex.x86_64:1.3.10-5.7.el7
# - ocaml-hivex-devel.x86_64:1.3.10-5.7.el7
# - python-hivex.x86_64:1.3.10-5.7.el7
# - ruby-hivex.x86_64:1.3.10-5.7.el7
#
# Last versions recommanded by security team:
# - hivex.i686:1.3.10-5.7.el7
# - hivex.x86_64:1.3.10-5.7.el7
# - hivex-debuginfo.i686:1.3.10-5.7.el7
# - hivex-debuginfo.x86_64:1.3.10-5.7.el7
# - perl-hivex.x86_64:1.3.10-5.7.el7
# - hivex-devel.i686:1.3.10-5.7.el7
# - hivex-devel.x86_64:1.3.10-5.7.el7
# - ocaml-hivex.x86_64:1.3.10-5.7.el7
# - ocaml-hivex-devel.x86_64:1.3.10-5.7.el7
# - python-hivex.x86_64:1.3.10-5.7.el7
# - ruby-hivex.x86_64:1.3.10-5.7.el7
#
# CVE List:
# - CVE-2014-9273
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install hivex.i686-1.3.10 -y
sudo yum install hivex.x86_64-1.3.10 -y
sudo yum install hivex-debuginfo.i686-1.3.10 -y
sudo yum install hivex-debuginfo.x86_64-1.3.10 -y
sudo yum install perl-hivex.x86_64-1.3.10 -y
sudo yum install hivex-devel.i686-1.3.10 -y
sudo yum install hivex-devel.x86_64-1.3.10 -y
sudo yum install ocaml-hivex.x86_64-1.3.10 -y
sudo yum install ocaml-hivex-devel.x86_64-1.3.10 -y
sudo yum install python-hivex.x86_64-1.3.10 -y
sudo yum install ruby-hivex.x86_64-1.3.10 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_7/x86_64/2015/RHSA-2015:0301.sh
|
Shell
|
mit
| 1,872 |
#!/bin/bash
set -e -o pipefail
if [ ! -f "/volumes/dropbox/.dropbox_uploader" ]; then
echo "[ERROR] Missing config file for Dropbox Uploader, see README."
exit 1
fi
tar -czf "/root/world.tar.gz" \
--exclude='*.lock' \
--exclude='.git' \
"game/world" \
"game/world_nether" \
"game/world_the_end" \
"game/spigot" \
"game/settings-custom"
dropbox_uploader.sh -p -h -f "/volumes/dropbox/.dropbox_uploader" \
upload "/root/world.tar.gz" "world.tar.gz"
|
themattrix/minecraft-fig
|
containers/dropbox-save-world/save-to-dropbox.sh
|
Shell
|
mit
| 491 |
#!/bin/sh
set -e
: ${MNEMOSYNED_PORT:=8080}
: ${MNEMOSYNED_HOST:=0.0.0.0}
: ${MNEMOSYNED_GRPC_DEBUG:=false}
: ${MNEMOSYNED_TTL:=24m}
: ${MNEMOSYNED_TTC:=1m}
: ${MNEMOSYNED_CLUSTER_LISTEN:=$(hostname):$MNEMOSYNED_PORT}
: ${MNEMOSYNED_CLUSTER_SEEDS:=}
: ${MNEMOSYNED_LOG_ENVIRONMENT:=production}
: ${MNEMOSYNED_LOG_LEVEL:=info}
: ${MNEMOSYNED_STORAGE:=postgres}
: ${MNEMOSYNED_POSTGRES_ADDRESS:=postgres://postgres:postgres@postgres/postgres?sslmode=disable}
: ${MNEMOSYNED_POSTGRES_TABLE:=session}
: ${MNEMOSYNED_POSTGRES_SCHEMA:=mnemosyne}
: ${MNEMOSYNED_POSTGRES_DEBUG:=false}
: ${MNEMOSYNED_TLS_ENABLED:=false}
if [ "$1" = 'mnemosyned' ]; then
exec mnemosyned -host=${MNEMOSYNED_HOST} \
-port=${MNEMOSYNED_PORT} \
-grpc.debug=${MNEMOSYNED_GRPC_DEBUG} \
-ttl=${MNEMOSYNED_TTL} \
-ttc=${MNEMOSYNED_TTC} \
-cluster.listen=${MNEMOSYNED_CLUSTER_LISTEN} \
-cluster.seeds=${MNEMOSYNED_CLUSTER_SEEDS} \
-storage=${MNEMOSYNED_STORAGE} \
-log.environment=${MNEMOSYNED_LOG_ENVIRONMENT} \
-log.level=${MNEMOSYNED_LOG_LEVEL} \
-postgres.address=${MNEMOSYNED_POSTGRES_ADDRESS} \
-postgres.table=${MNEMOSYNED_POSTGRES_TABLE} \
-postgres.schema=${MNEMOSYNED_POSTGRES_SCHEMA} \
-tls=${MNEMOSYNED_TLS_ENABLED} \
-tls.crt=${MNEMOSYNED_TLS_CRT} \
-tls.key=${MNEMOSYNED_TLS_KEY} \
-tracing.agent.address=${MNEMOSYNED_TRACING_AGENT_ADDRESS}
fi
exec "$@"
|
piotrkowalczuk/mnemosyne
|
scripts/docker-entrypoint.sh
|
Shell
|
mit
| 1,352 |
#!/bin/bash
docker build --no-cache -t glot/coffeescript:latest .
|
kurtinge/glot-containers
|
coffeescript/build.sh
|
Shell
|
mit
| 66 |
#!/usr/bin/env sh
isort **/*.py && black -l 79 **/*.py
mypy **/*.py --ignore-missing-imports
pyflakes **/*.py
PYTHONPATH=. python -m pytest tests/
|
cptx032/calcium
|
runtests.sh
|
Shell
|
mit
| 147 |
dcheck git || return
alias wdiff="git --no-pager diff --color=auto --no-ext-diff --no-index --color-words '--word-diff-regex=[a-zA-Z0-9\-_]+'"
|
justinhoward/dotfiles
|
modules/wdiff/init.sh
|
Shell
|
mit
| 144 |
#!/bin/bash
root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
source "$LSN_COMMON/functions"
cd $root
git_repo="https://github.com/balupton/history.js.git"
dir_proj="/home/ryan/Projects/history.js"
dir_dest="$root/src/lib/ecma/platform"
#
# Update sources
#
if [ ! -d "$dir_proj" ]; then
if ($(ask_yn "Create: $dir_proj")); then
mkdir -p $dir_proj
pdir=$(dir_absolute "$dir_proj/../")
name=$(basename "$dir_proj")
echo "Pdir $pdir"
cd $pdir
git clone "$git_repo" "$name"
cd $dir_proj
else
exit 0
fi
else
cd $dir_proj
# git submodule update --init --recursive
git pull
fi
#
# Copy to project space
#
mkdir -p $dir_dest
cp scripts/bundled/html4+html5/native.history.js $dir_dest/
|
ryangies/lsn-javascript
|
scripts/up-history-js.sh
|
Shell
|
mit
| 736 |
#
# npm command completion script
#
# Install Method 1: Automatic
# Put this file in /etc/bash-completion.d or /usr/local/etc/bash-completion.d
# or wherever bash-completion scripts are sourced on your system.
#
# Install Method 2: Generic
# Put this in .bashrc or whatever file you run when you log into a machine:
# . path/to/npm-completion.sh
#
# Then use the tab key, which executes the "npm completion" command.
#
# Special thanks to Evan Meagher for making the npm completion command
# much more useful and complete.
COMP_WORDBREAKS=${COMP_WORDBREAKS/=/}
COMP_WORDBREAKS=${COMP_WORDBREAKS/@/}
export COMP_WORDBREAKS
__npm_completion () {
COMPREPLY=()
local cur prev opts logfile
if [ "${loglevel:-silent}" == "silent" ]; then
logfile=/dev/null
else
logfile=/tmp/npm-completion.log
fi
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
# opts=$(npm complete --loglevel silent --color false -- "$cur")
COMPREPLY=( $(COMP_CWORD=$COMP_CWORD \
COMP_LINE=$COMP_LINE \
COMP_POINT=$COMP_POINT \
COMP_WORDBREAKS=$COMP_WORDBREAKS \
COMP_WORDS="${COMP_WORDS[@]}" \
npm completion --color false --loglevel "${loglevel:-silent}" \
-- "${COMP_WORDS[@]}" \
2>>$logfile ) )
return $?
}
complete -o default -F __npm_completion npm
|
BogusCurry/npm
|
npm-completion.sh
|
Shell
|
mit
| 1,378 |
#!/bin/sh
SALT='$murphyseanmovienight$:'
PASSWORD=password
export SALTED_PASSWORD=$SALT$PASSWORD
#Mac version uses shasum, linux could use sha512sum
#Not working for some reason, in this script it returns different results than shell
HASHED_PASSWORD=`echo -n $SALTED_PASSWORD | shasum -a 512256 | xxd -r -p | base64`
HASHED_PASSWORD='ShWHajVZLQ14puMoUlSrJUvzhpp+8WK7ElsPCqmMsFA='
echo $HASHED_PASSWORD
echo "Creating Users..."
for i in `seq 1 10`;
do
sqlite3 mn.db "INSERT INTO users (id,name,email,password) VALUES ($i,'User $i', '[email protected]', '$HASHED_PASSWORD')"
done
echo "Users Created"
echo "Creating Movies..."
curl 'http://localhost:9000/admin/movie?imdb=tt1446714'
curl 'http://localhost:9000/admin/movie?imdb=tt0120902'
curl 'http://localhost:9000/admin/movie?imdb=tt1823672'
curl 'http://localhost:9000/admin/movie?imdb=tt0470752'
echo "Movies Created"
echo "Creating Showtimes..."
sqlite3 mn.db "INSERT INTO showtimes (movieid,showtime,screen) VALUES ('1446714',substr('`date -u -v+tue -v18H -v0M -v0S '+%Y-%m-%d %H:%M:%S%z'`',1,22) || ':00','2D')"
sqlite3 mn.db "INSERT INTO showtimes (movieid,showtime,screen) VALUES ('0120902',substr('`date -u -v+tue -v19H -v0M -v0S '+%Y-%m-%d %H:%M:%S%z'`',1,22) || ':00','2D')"
sqlite3 mn.db "INSERT INTO showtimes (movieid,showtime,screen) VALUES ('1823672',substr('`date -u -v+tue -v20H -v0M -v0S '+%Y-%m-%d %H:%M:%S%z'`',1,22) || ':00','2D')"
sqlite3 mn.db "INSERT INTO showtimes (movieid,showtime,screen) VALUES ('0470752',substr('`date -u -v+tue -v21H -v0M -v0S '+%Y-%m-%d %H:%M:%S%z'`',1,22) || ':00','2D')"
echo "Showtimes Created"
echo "Casting Votes..."
echo "Votes Cast"
|
murphysean/movie-night
|
scripts/populate.sh
|
Shell
|
mit
| 1,649 |
# If set, parameter expansion, command substitution and arithmetic expansion are
# performed in prompts. Substitutions within prompts do not affect the command
# status.
#
# http://zsh.sourceforge.net/Doc/Release/Options.html#index-PROMPTSUBST
#
setopt PROMPT_SUBST
# If the PROMPT_SUBST option is set, the prompt string is first subjected to
# parameter expansion, command substitution and arithmetic expansion.
#
# %n $USERNAME.
#
# %M The full machine hostname.
#
# %~ As %d and %/, but if the current working directory starts with $HOME, that
# part is replaced by a '~'. Furthermore, if it has a named directory as its
# prefix, that part is replaced by a '~' followed by the name of the
# directory, but only if the result is shorter than the full path; Filename
# Expansion.
#
#
# http://zsh.sourceforge.net/Doc/Release/Prompt-Expansion.html
# Replicate Debian GNU/Linux bash prompt.
PROMPT='%n@%M:%~%(!.#.$) '
|
asherbender/zsh-dot-files
|
prompt.zsh
|
Shell
|
mit
| 947 |
if ! [[ "$LOCALIZR_DISABLE_AUTO_MIGRATION" = "1" ]]; then
python manage.py migrate
fi
cat <<EOF | python manage.py shell
import os
from django.contrib.auth.models import User
if os.environ.get("ADMIN_USERNAME", None):
User.objects.filter(username="$ADMIN_USERNAME").exists() or \
User.objects.create_superuser("$ADMIN_USERNAME", "$ADMIN_EMAIL", "$ADMIN_PASSWORD")
EOF
gunicorn -b 0.0.0.0:$PORT project.wsgi --preload
|
michaelhenry/Localizr
|
entrypoint-heroku.sh
|
Shell
|
mit
| 426 |
#!/bin/sh
cd
#cd Ap tab
cd Application
cd
#cd De tab
cd Desktop
|
ReanyAlex/unix_and_bash
|
linux/nav_ex3.sh
|
Shell
|
mit
| 68 |
#!/bin/bash
source TEMP.conf
for item in TEMP.conf; then
do
echo item
done
|
zuimrs/zopenconfig
|
temp-env.sh
|
Shell
|
mit
| 75 |
#!/bin/bash
#Script can be called with an environment parameter
ENV=LOCAL;
if [ "${1}" ]; then
ENV=$1;
fi
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd ../../.. && pwd )";
MODULEDIR="$BASEDIR/ttools/sitesync-core";
#sourcing variables
source $MODULEDIR/lib/vars.sh;
KEEP=$BACKUP_KEEP_DEFAULT;
if [ "$Sitesync_DumpBackupKeep" ]; then
KEEP=$Sitesync_DumpBackupKeep
fi
##TODO show available backups and don't allow running a backup with a name that alreay exists
##TODO we could actually prepend the name with the date
echo "Enter a name:"
echo "You can leave the name blank, and a default backup will be created."
echo "The last $KEEP default backups are kept while named backups are kept until manually deleted."
read BACKUP_NAME
$MODULEDIR/lib/dump-current-site.sh backup $ENV $BACKUP_NAME;
|
titledk/ttools-sitesync-core
|
local/backup.sh
|
Shell
|
mit
| 816 |
#!/bin/sh
cd `dirname $0`
exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s simmoa
|
chrisduesing/simmoa
|
start.sh
|
Shell
|
mit
| 93 |
#!/bin/sh
Red='\33[0;31m';
Gre='\33[0;32m';
RCol='\33[0m';
cd `dirname $0`
python setup.py test &&\
printf "\nPEP 8 compliance check:\n\n"
pep8 \
--repeat \
--show-source \
--exclude=.venv,.tox,dist,docs,build,*.egg,tests,misc . && \
printf "[${Gre}Passed${RCol}] Yeah! everything is clean\n\n" || \
printf "[${Red}Failed${RCol}] Oh No! there is some mess to fix\n\n"
|
kakwa/ldapcherry
|
run_test.sh
|
Shell
|
mit
| 390 |
sudo iptables -F
sudo iptables -N TRAFFIC_ACC_IN
sudo iptables -N TRAFFIC_ACC_OUT
sudo iptables -A INPUT -p tcp --sport 6666 -j ACCEPT
sudo iptables -A INPUT -j TRAFFIC_ACC_IN
sudo iptables -A OUTPUT -p tcp --dport 6666 -j ACCEPT
sudo iptables -A OUTPUT -j TRAFFIC_ACC_OUT
sudo iptables -A TRAFFIC_ACC_IN -p tcp ! --sport 6666
sudo iptables -A TRAFFIC_ACC_IN -p udp
sudo iptables -A TRAFFIC_ACC_IN -p icmp
sudo iptables -A TRAFFIC_ACC_IN -p all
sudo iptables -A TRAFFIC_ACC_OUT -p tcp ! --dport 6666
sudo iptables -A TRAFFIC_ACC_OUT -p udp
sudo iptables -A TRAFFIC_ACC_OUT -p icmp
sudo iptables -A TRAFFIC_ACC_OUT -p all
|
TINDreamTeam/PABS
|
iptables_setup.sh
|
Shell
|
mit
| 621 |
FROM debian:buster
|
yglukhov/nim-docker
|
layers/debian-buster.sh
|
Shell
|
mit
| 19 |
gulp build
cp -r dist/carbon /C/Dev/orcharddev/src/Orchard.Web/Modules/CommonLib.Polymer/Components/at-core-theme/dist
cp -r dist/carbon /C/Dev/designer-0.8-preview/components/at-core-theme/dist
|
TangereJs/Semantic-UI
|
build.sh
|
Shell
|
mit
| 194 |
#! /bin/sh
# @copyright Russell Standish 2000-2013
# @author Russell Standish
# This file is part of Classdesc
#
# Open source licensed under the MIT license. See LICENSE for details.
here=`pwd`
if test $? -ne 0; then exit 2; fi
tmp=/tmp/$$
mkdir $tmp
if test $? -ne 0; then exit 2; fi
cd $tmp
if test $? -ne 0; then exit 2; fi
fail()
{
echo "FAILED" 1>&2
cd $here
chmod -R u+w $tmp
rm -rf $tmp
exit 1
}
pass()
{
echo "PASSED" 1>&2
cd $here
chmod -R u+w $tmp
rm -rf $tmp
exit 0
}
trap "fail" 1 2 3 15
export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
# insert ecolab script code here
# use \$ in place of $ to refer to variable contents
# exit 0 to indicate pass, and exit 1 to indicate failure
cat >input.tcl <<EOF
test_boostgraph
EOF
$here/test/test_boostgraph input.tcl
if test $? -ne 0; then fail; fi
pass
|
highperformancecoder/ecolab
|
test/00/t0029a.sh
|
Shell
|
mit
| 867 |
# nvm's bash_completion script contains a handler for zsh too
source ~/.bash/nvm.bash
|
benknoble/Dotfiles
|
links/zsh/nvm.zsh
|
Shell
|
mit
| 86 |
#!/bin/bash
echo "YeAPF 0.8.59 tools installer";
echo "Copyright (C) 2004-2017 Esteban Daniel Dortta - [email protected]";
cant=1
dist=`uname -s`
dist=${dist:0:6}
if [[ "$dist" == "CYGWIN" ]]; then
cant=0
fi
if [[ $EUID -eq 0 ]]; then
cant=0
fi
if [[ $cant -ne 0 ]]; then
echo "You must be root to run this script. Aborting...";
exit 1;
fi
tgt="/usr/bin"
if [[ "$dist" == "Darwin" ]]; then
tgt="/usr/local/bin"
fi
cp ydistbuilder $tgt/
cp yviewdistinfo $tgt/
cp ydocbuilder $tgt/
cp ydocview $tgt/
cp ycheckdistsource $tgt/
chmod +x $tgt/ydistbuilder
chmod +x $tgt/yviewdistinfo
chmod +x $tgt/ydocbuilder
chmod +x $tgt/ydocview
chmod +x $tgt/ycheckdistsource
|
EDortta/YeAPF
|
0.8.59/tools/distributionBuilder/install-ydistbuilder.sh
|
Shell
|
mit
| 676 |
#!/usr/bin/env bash
function cat {
local homedir=''
local passphrase=''
OPTIND=1
while getopts 'hd:p:' opt; do
case "$opt" in
h) _show_manual_for 'cat';;
p) passphrase=$OPTARG;;
d) homedir=$(_clean_windows_path "$OPTARG");;
*) _invalid_option_for 'cat';;
esac
done
shift $((OPTIND-1))
[ "$1" = '--' ] && shift
_user_required
# Command logic:
for line in "$@"
do
local filename
local path
filename=$(_get_record_filename "$line")
path=$(_prepend_relative_root_path "$filename") # this uses the _relative version because of #710
# The parameters are: filename, write-to-file, force, homedir, passphrase
_decrypt "$path" "0" "0" "$homedir" "$passphrase"
done
}
|
sobolevn/git-secret
|
src/commands/git_secret_cat.sh
|
Shell
|
mit
| 752 |
# init according to man page
if (( $+commands[jenv] ))
then
eval "$(jenv init -)"
fi
|
hershmire/dotfiles
|
java/jenv.zsh
|
Shell
|
mit
| 87 |
#!/bin/sh
if [ ! -z $1 ]; then
docker logs -f --tail 200 daylove
else
docker logs --tail 200 daylove
fi
|
netroby/daylove
|
catlog.sh
|
Shell
|
mit
| 105 |
#!/usr/bin/env sh
rm -rf /tmp/kafka-data
mkdir /tmp/kafka-data
mkdir /tmp/kafka-data/data
mkdir /tmp/kafka-data/logs
chmod -R 777 /tmp/kafka-data
BASEDIR=$(git rev-parse --show-toplevel)
if [ -z "$(docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml ps -q)" ]; then
${BASEDIR}/kafka-setup/generate-certs.sh
docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml rm
fi
docker-compose --file ${BASEDIR}/kafka-setup/docker-compose.yml up -d
|
nodefluent/node-sinek
|
kafka-setup/start.sh
|
Shell
|
mit
| 466 |
#!/bin/bash
#Mount cleartext folder "testing1" as cyphertext "testing2"
encfs --reverse ~/zEncrypted/testing1 ~/zEncrypted/testing2
#Mount amazon cloud drive
acd_cli mount ~/zAmazon
#Upload Encrypted text
acd_cli upload ~/zEncrypted/testing2 /
#Mount cyphertext cloud folder as cleartext folder "testing3"
ENCFS6_CONFIG=~/zEncrypted/testing1/.encfs6.xml encfs ~/zAmazon/testing2 ~/zEncrypted/testing3
read -p "File upload now complete."
read -p "Press enter to unmount everything"
sudo umount ~/zEncrypted/testing3
acd_cli umount ~/zAmazon
sudo umount ~/zEncrypted/testing2
read -p "Folders are now unmounted. Press enter to exit."
|
7blink/scripts
|
encfsAmazon.sh
|
Shell
|
mit
| 644 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:0668
#
# Security announcement date: 2013-03-21 18:30:50 UTC
# Script generation date: 2017-01-01 21:14:32 UTC
#
# Operating System: Red Hat 5
# Architecture: i386
#
# Vulnerable packages fix on version:
# - boost.i386:1.33.1-16.el5_9
# - boost-debuginfo.i386:1.33.1-16.el5_9
# - boost-doc.i386:1.33.1-16.el5_9
# - boost-devel.i386:1.33.1-16.el5_9
#
# Last versions recommanded by security team:
# - boost.i386:1.33.1-16.el5_9
# - boost-debuginfo.i386:1.33.1-16.el5_9
# - boost-doc.i386:1.33.1-16.el5_9
# - boost-devel.i386:1.33.1-16.el5_9
#
# CVE List:
# - CVE-2012-2677
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install boost.i386-1.33.1 -y
sudo yum install boost-debuginfo.i386-1.33.1 -y
sudo yum install boost-doc.i386-1.33.1 -y
sudo yum install boost-devel.i386-1.33.1 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/i386/2013/RHSA-2013:0668.sh
|
Shell
|
mit
| 968 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2600-1
#
# Security announcement date: 2013-01-06 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:28 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - cups:1.4.4-7+squeeze2
#
# Last versions recommanded by security team:
# - cups:1.4.4-7+squeeze10
#
# CVE List:
# - CVE-2012-5519
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade cups=1.4.4-7+squeeze10 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2013/DSA-2600-1.sh
|
Shell
|
mit
| 614 |
#!/bin/bash -x
# write to file
overwrite_to_file()
{
base16-builder --scheme "db/schemes/base2tone-evening.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-evening-dark.css"
base16-builder --scheme "db/schemes/base2tone-evening.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-evening-light.css"
base16-builder --scheme "db/schemes/base2tone-morning.yml" --template "db/templates/prism/dark-alt.ejs" > "output/prism/prism-base2tone-morning-dark.css"
base16-builder --scheme "db/schemes/base2tone-morning.yml" --template "db/templates/prism/light.ejs" > "output/prism/prism-base2tone-morning-light.css"
base16-builder --scheme "db/schemes/base2tone-space.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-space-dark.css"
base16-builder --scheme "db/schemes/base2tone-space.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-space-light.css"
base16-builder --scheme "db/schemes/base2tone-sea.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-sea-dark.css"
base16-builder --scheme "db/schemes/base2tone-sea.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-sea-light.css"
base16-builder --scheme "db/schemes/base2tone-forest.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-forest-dark.css"
base16-builder --scheme "db/schemes/base2tone-forest.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-forest-light.css"
base16-builder --scheme "db/schemes/base2tone-earth.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-earth-dark.css"
base16-builder --scheme "db/schemes/base2tone-earth.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-earth-light.css"
base16-builder --scheme "db/schemes/base2tone-desert.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-desert-dark.css"
base16-builder --scheme "db/schemes/base2tone-desert.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-desert-light.css"
base16-builder --scheme "db/schemes/base2tone-pool.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-pool-dark.css"
base16-builder --scheme "db/schemes/base2tone-pool.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-pool-light.css"
base16-builder --scheme "db/schemes/base2tone-lake.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-lake-dark.css"
base16-builder --scheme "db/schemes/base2tone-lake.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-lake-light.css"
base16-builder --scheme "db/schemes/base2tone-cave.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-cave-dark.css"
base16-builder --scheme "db/schemes/base2tone-cave.yml" --template "db/templates/prism/light.ejs" > "output/prism/prism-base2tone-cave-light.css"
base16-builder --scheme "db/schemes/base2tone-heath.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-heath-dark.css"
base16-builder --scheme "db/schemes/base2tone-heath.yml" --template "db/templates/prism/light.ejs" > "output/prism/prism-base2tone-heath-light.css"
base16-builder --scheme "db/schemes/base2tone-drawbridge.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-drawbridge-dark.css"
base16-builder --scheme "db/schemes/base2tone-drawbridge.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-drawbridge-light.css"
base16-builder --scheme "db/schemes/base2tone-meadow.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-meadow-dark.css"
base16-builder --scheme "db/schemes/base2tone-meadow.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-meadow-light.css"
base16-builder --scheme "db/schemes/base2tone-lavender.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-lavender-dark.css"
base16-builder --scheme "db/schemes/base2tone-lavender.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-lavender-light.css"
base16-builder --scheme "db/schemes/base2tone-garden.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-garden-dark.css"
base16-builder --scheme "db/schemes/base2tone-garden.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-garden-light.css"
base16-builder --scheme "db/schemes/base2tone-suburb.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-suburb-dark.css"
base16-builder --scheme "db/schemes/base2tone-suburb.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-suburb-light.css"
base16-builder --scheme "db/schemes/base2tone-mall.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-mall-dark.css"
base16-builder --scheme "db/schemes/base2tone-mall.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-mall-light.css"
base16-builder --scheme "db/schemes/base2tone-porch.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-porch-dark.css"
base16-builder --scheme "db/schemes/base2tone-porch.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-porch-light.css"
base16-builder --scheme "db/schemes/base2tone-field.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-field-dark.css"
base16-builder --scheme "db/schemes/base2tone-field.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-field-light.css"
base16-builder --scheme "db/schemes/base2tone-motel.yml" --template "db/templates/prism/dark.ejs" > "output/prism/prism-base2tone-motel-dark.css"
base16-builder --scheme "db/schemes/base2tone-motel.yml" --template "db/templates/prism/light-alt.ejs" > "output/prism/prism-base2tone-motel-light.css"
}
# execute it
overwrite_to_file
|
atelierbram/Base2Tone-prism
|
io.sh
|
Shell
|
mit
| 6,165 |
echo "Now running Platon's mind... (Compilation may take a few seconds)"
echo
sbcl --noinform --disable-ldb --lose-on-corruption --no-sysinit --disable-debugger --load ./platonsmind.lisp --end-toplevel-options localhost:55559
|
hoelzl/Academia
|
Scenario/maze/runplatonsmind.sh
|
Shell
|
mit
| 226 |
#!/usr/bin/env bash
# Iterates over all modules bundled in the dist/ and publish them
for dir in ./dist/*/
do
dir=${dir%*/}
npm publish --access=public dist/${dir##*/}/
done
|
akveo/nebular
|
tools/publish.sh
|
Shell
|
mit
| 183 |
#!/bin/sh
echo '
on run argv
if length of argv is equal to 0
set command to ""
else
set command to item 1 of argv
end if
if length of argv is less than 2
runSimple(command)
end if
if length of argv is greater than 1
set profile to item 2 of argv
if length of argv is greater than 2
set winSize to {item 3 of argv, item 4 of argv}
else
#set the window size if a value is not passed in:
set winSize to {640, 640}
end if
if length of argv is greater than 4
set winPos to {item 5 of argv, item 6 of argv}
else
set winPos to {50, 500}
end if
runWithProfile(command, profile, winSize, winPos)
end if
end run
on runSimple(command)
tell application "Terminal"
activate
set newTab to do script(command)
end tell
return newTab
end runSimple
on runWithProfile(command, profile, winSize, winPos)
set newTab to runSimple(command)
tell application "Terminal" to set current settings of newTab to (first settings set whose name is profile)
tell application "System Events"
tell process "Terminal"
activate
set size of window 1 to winSize
set position of window 1 to winPos
end tell
end tell
end runWithProfile
' | osascript - "$@" > /dev/null
|
mattgiguere/shellScripts
|
term.sh
|
Shell
|
mit
| 1,270 |
#!/bin/sh
bindir=$(pwd)
cd /home/thijs/codes/BSshadow/OpenGL-tutorial_v0014_33/tutorial04_colored_cube/
export LD_LIBRARY_PATH=:$LD_LIBRARY_PATH
if test "x$1" = "x--debugger"; then
shift
if test "x" = "xYES"; then
echo "r " > $bindir/gdbscript
echo "bt" >> $bindir/gdbscript
-batch -command=$bindir/gdbscript /home/thijs/codes/BSshadow/OpenGL-tutorial_v0014_33/build/tutorial04_colored_cube
else
"/home/thijs/codes/BSshadow/OpenGL-tutorial_v0014_33/build/tutorial04_colored_cube"
fi
else
"/home/thijs/codes/BSshadow/OpenGL-tutorial_v0014_33/build/tutorial04_colored_cube"
fi
|
thijser/BSshadow
|
OpenGL-tutorial_v0014_33/build/launch-tutorial04_colored_cube.sh
|
Shell
|
mit
| 597 |
export PATH="/home/marcel/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/usr/local/node/bin:$PATH"
export FPATH="/home/marcel/.oh-my-zsh/custom/plugins:$FPATH"
export EDITOR="/usr/bin/atom"
export EDITOR_CLI="/usr/bin/atom"
export CHEATCOLORS=true
export LANG=en_US.UTF-8
|
Marcel-Robitaille/zshdotfiles
|
oh-my-zsh/exports.sh
|
Shell
|
mit
| 321 |
#!/bin/bash
#
# Build munin package from Debian debian-experimental branch.
#
# DEPENDS :docker pull szepeviktor/stretch-backport
test -d /opt/results || mkdir /opt/results
# source hook
cat >/opt/results/debackport-source <<"EOF"
#git clone -b debian-experimental "https://salsa.debian.org/debian/munin.git" munin
git clone -b debian-experimental "https://salsa.debian.org/sumpfralle-guest/munin.git" munin
cd munin/
# Remove "debian/" from version
sed -e 's%git describe --long %&| sed -e "s#^debian/##"%' -i getversion
# Debug
#sed -e 's/make -C doc html man/#&/' -i debian/rules
#sed -e 's/# export DH_VERBOSE=1/export DH_VERBOSE=1/' -i debian/rules
#sed -e '1s|#!/bin/sh|#!/bin/bash -x|' -i getversion
# quilt
PKG_VERSION="$(dpkg-parsechangelog --show-field Version)"
tar -cJf ../munin_${PKG_VERSION%-*}.orig.tar.xz .
CHANGELOG_MSG="From Debian git/debian-experimental"
EOF
# pre-deps hook
cat >/opt/results/debackport-pre-deps <<"EOF"
# debhelper v11
echo "deb http://debian-archive.trafficmanager.net/debian stretch-backports main" \
| sudo -- tee /etc/apt/sources.list.d/backports.list
sudo apt-get update
sudo apt-get install -y debhelper/stretch-backports
# HTTP::Server::Simple::CGI
sudo apt-get install -y libhttp-server-simple-perl
# HTTP::Server::Simple::CGI::PreFork from buster
wget "http://ftp.de.debian.org/debian/pool/main/libh/libhttp-server-simple-cgi-prefork-perl/libhttp-server-simple-cgi-prefork-perl_6-1_all.deb"
sudo dpkg -i libhttp-server-simple-cgi-prefork-perl_*_all.deb
sudo rm libhttp-server-simple-cgi-prefork-perl_*_all.deb
sudo apt-get install -y -f
# Alien::RRDtool and inc::latest
sudo apt-get install -y pkg-config graphviz libxml2-dev libpango1.0-dev libcairo2-dev \
libfile-sharedir-perl libtest-requires-perl libmodule-build-perl
sudo PERL_MM_USE_DEFAULT=1 cpan -i inc::latest
sudo PERL_MM_USE_DEFAULT=1 cpan -i Alien::RRDtool
EOF
# Build
docker run --rm --tty -v /opt/results:/opt/results --env PACKAGE="munin" szepeviktor/stretch-backport
rm -f /opt/results/{debackport-source,debackport-pre-deps}
|
szepeviktor/debian-server-tools
|
package/docker-backport-munin.sh
|
Shell
|
mit
| 2,060 |
#!/usr/bin/env bash
CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=scripts/helpers.sh
source "$CURRENT_DIR/helpers.sh"
# script global variables
ram_low_icon=""
ram_medium_icon=""
ram_high_icon=""
ram_low_default_icon="="
ram_medium_default_icon="≡"
ram_high_default_icon="≣"
# icons are set as script global variables
get_icon_settings() {
ram_low_icon=$(get_tmux_option "@ram_low_icon" "$ram_low_default_icon")
ram_medium_icon=$(get_tmux_option "@ram_medium_icon" "$ram_medium_default_icon")
ram_high_icon=$(get_tmux_option "@ram_high_icon" "$ram_high_default_icon")
}
print_icon() {
local ram_percentage
local ram_load_status
ram_percentage=$("$CURRENT_DIR"/ram_percentage.sh | sed -e 's/%//')
ram_load_status=$(load_status "$ram_percentage" "ram")
if [ "$ram_load_status" == "low" ]; then
echo "$ram_low_icon"
elif [ "$ram_load_status" == "medium" ]; then
echo "$ram_medium_icon"
elif [ "$ram_load_status" == "high" ]; then
echo "$ram_high_icon"
fi
}
main() {
get_icon_settings
print_icon "$1"
}
main "$@"
|
tmux-plugins/tmux-cpu
|
scripts/ram_icon.sh
|
Shell
|
mit
| 1,092 |
#!/bin/sh
#
# Script to install a system onto the BBB eMMC.
# This script handles the root file system partition.
#
# Run it like this:
#
# ./emmc_copy_rootfs.sh <image-file> [ <hostname> ]
#
MACHINE=beaglebone
DEV=/dev/mmcblk1p2
if [ -z "${SRCDIR}" ]; then
SRCDIR=.
else
if [ ! -d "${SRCDIR}" ]; then
echo "Source directory not found: ${SRCDIR}"
exit 1
fi
fi
if [ "x${1}" = "x" ]; then
echo -e "\nUsage: ${0} <image-name> [<hostname>]\n"
exit 0
fi
if [ ! -d /media ]; then
echo "Mount point /media does not exist"
exit 1
fi
if [ ! -b $DEV ]; then
echo "Block device not found: $DEV"
exit 1
fi
if [ -f "${1}" ]; then
FULLPATH="${1}"
elif [ -f "${SRCDIR}/${1}" ]; then
FULLPATH="${SRCDIR}/${1}"
elif [ -f "${SRCDIR}/${1}-${MACHINE}.tar.xz" ]; then
FULLPATH="${SRCDIR}/${1}-${MACHINE}.tar.xz"
elif [ -f "${SRCDIR}/${1}-image-${MACHINE}.tar.xz" ]; then
FULLPATH="${SRCDIR}/${1}-image-${MACHINE}.tar.xz"
else
echo "Rootfs image file not found."
echo "Tried the following:"
echo "${1}"
echo "${SRCDIR}/${1}"
echo "${SRCDIR}/${1}-${MACHINE}.tar.xz"
echo "${SRCDIR}/${1}-image-${MACHINE}.tar.xz"
exit 1
fi
if [ "x${2}" = "x" ]; then
TARGET_HOSTNAME=$MACHINE
else
TARGET_HOSTNAME=${2}
fi
echo -e "HOSTNAME: $TARGET_HOSTNAME\n"
echo "Formatting $DEV as ext4"
mkfs.ext4 -q -L ROOT $DEV
echo "Mounting $DEV as /media"
mount $DEV /media
echo "Extracting ${FULLPATH} to /media"
tar -C /media -xJf ${FULLPATH}
echo "Writing hostname to /etc/hostname"
export TARGET_HOSTNAME
echo ${TARGET_HOSTNAME} > /media/etc/hostname
if [ -f ${SRCDIR}/interfaces ]; then
echo "Writing interfaces to /media/etc/network/"
cp ${SRCDIR}/interfaces /media/etc/network/interfaces
fi
if [ -f ${SRCDIR}/wpa_supplicant.conf ]; then
echo "Writing wpa_supplicant.conf to /media/etc/"
cp ${SRCDIR}/wpa_supplicant.conf /media/etc/wpa_supplicant.conf
fi
echo "Unmounting $DEV"
umount $DEV
echo "Done"
|
Netgate/meta-ubmc
|
recipes-support/emmc-installer/files/emmc_copy_rootfs.sh
|
Shell
|
mit
| 1,980 |
#!/bin/bash
#
# Vim
#
# This installs plugins and other goodies for Vim
# Install Vim solarized colour scheme
[[ -d ~/.vim/colors ]] || mkdir -p ~/.vim/colors
if ! [ -e ~/.vim/colors/solarized.vim ]
then
curl -L -o ~/.vim/colors/solarized.vim https://raw.githubusercontent.com/altercation/solarized/master/vim-colors-solarized/colors/solarized.vim
fi
# Install Vundle plugin manager
[[ -d ~/.vim/bundle ]] || mkdir -p ~/.vim/bundle
if ! [ -e ~/.vim/bundle/Vundle.vim ]
then
git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim
fi
# Install powerline fonts
if ! [ -e ~/Library/Fonts/inconsolata-powerline.otf ]
then
curl -L -o ~/Library/Fonts/inconsolata-powerline.otf https://raw.githubusercontent.com/Lokaltog/powerline-fonts/master/Inconsolata/Inconsolata%20for%20Powerline.otf
fi
# Compile supporting code for You Complete Me plugin
if ! [ -e ~/.vim/bundle/YouCompleteMe/third_party/ycmd/ycm_client_support.so ]
then
cd ~/.vim/bundle/YouCompleteMe
~/.vim/bundle/YouCompleteMe/install.sh --clang-completer
cd ~/.dotfiles
fi
|
loganfuller/dotfiles
|
vim/install.sh
|
Shell
|
mit
| 1,078 |
#!/bin/bash
# take the raw output of mgblast which is 12 fields
# and get the query, subject, %identity, bitscore
cut -f 1,5,9,10 mgblast_out.txt > cut_fields.txt
# print each line if the %identity is above 90.00
awk '$3>90.00' cut_fields.txt > parsed_cut.txt
# now prepare for fgclust by only taking the query, subject, and score
cut -f 1,2,4 parsed_cut.txt > fgclust_in.txt
## try: cut -f 1,5,9,10 | awk | cut
|
sestaton/sesbio
|
genome_assembly/shell_scripts/parse_mgblash_id.sh
|
Shell
|
mit
| 415 |
#!/usr/bin/env bash
function fixture() {
if [ -z "${1:-}" ]; then
printf "Usage: fixture name\n" >&2
return 1
fi
local NEW=""
if [ ! -d "${HOME}/fixtures/${1}" ]; then
NEW=" (new)"
mkdir -p "${HOME}/fixtures/${1}"
fi
clear
printf "${GREEN}==>${RESET} fixture ${1}${NEW}\n"
cd "${HOME}/fixtures/${1}"
title "fixture ${1}"
}
# Completion
function _fixture_completion() {
if [ "${#COMP_WORDS[@]}" != "2" ]; then
return
fi
local DIRS=($(compgen -d -S " " "${HOME}/fixtures/${COMP_WORDS[1]}"))
for I in ${!DIRS[@]}; do
COMPREPLY[$I]="${DIRS[$I]##${HOME}/fixtures/}"
done
}
complete -F _fixture_completion fixture
|
smashwilson/dotfiles
|
bash/fixture.bash
|
Shell
|
mit
| 666 |
#!/usr/bin/env bash
set -e
TESTCATEGORY=$1
PLATFORM=$2
if [ "$TESTCATEGORY" == "" ]; then
echo You must specify a category!
exit 1
fi
if [ "$PLATFORM" == "" ]; then
PLATFORM=`uname`
fi
BUILDDIR=./build/$TESTCATEGORY
OUTPUTDIR=./build/benchmarks
mkdir -p $OUTPUTDIR
function benchmark {
local NAMES=$1
local SERIES=$2
local LOG=$OUTPUTDIR/$TESTCATEGORY.txt
echo "# " > $LOG
for i in $NAMES;
do
local NAME=${TESTCATEGORY}_$i
local TEST=$BUILDDIR/$NAME
for NUM in $SERIES;
do
echo $TEST $NUM
$TEST $NUM >> $LOG
done
done
}
function benchmark_array {
local SERIES=()
count=100000
while [ $count -le 1000000 ]
do
SERIES+="$count "
((count+=100000))
done
benchmark "stl eastl boost jc carray" "$SERIES"
}
case $TESTCATEGORY in
"hashtable") benchmark_hashtable ;;
"array") benchmark_array ;;
"algorithm") benchmark_algorithm ;;
esac
|
JCash/containers
|
benchmarks/benchmark.sh
|
Shell
|
mit
| 1,008 |
#!/bin/bash
PROJECT_ID=125331
OUTPUT_DIR=src/code/utils/lang
LANGUAGES=("he" "tr" "zh-TW" "zh-HANS" "es" "pl" "et" "el" "nb" "nn" "de" "th" "pt-BR")
# argument processing from https://stackoverflow.com/a/14203146
while [[ $# -gt 1 ]]
do
key="$1"
case $key in
-a|--api_token)
API_TOKEN="$2"
shift # past argument
;;
-o|--output_dir)
OUTPUT_DIR="$2"
shift # past argument
;;
esac
shift # past argument or value
done
for LANGUAGE in "${LANGUAGES[@]}"
do
echo "Requesting strings for '$LANGUAGE'..."
PULLARGS="-p $PROJECT_ID -l $LANGUAGE -o $OUTPUT_DIR -a $API_TOKEN"
# echo "PULLARGS=$PULLARGS"
./bin/strings-pull.sh $PULLARGS
echo ""
done
# update English strings as well (e.g. stripping comments)
./node_modules/.bin/strip-json-comments src/code/utils/lang/en-US-master.json \
> src/code/utils/lang/en-US.json
|
concord-consortium/building-models
|
bin/strings-pull-project.sh
|
Shell
|
mit
| 903 |
#!/bin/bash
THIS_ADDR="dtn://192.168.11.122.gao.com"
dir=`pwd`
num=1
fileflag="bundle payload is file@#@&%~#%"
bundlelist=`ls /var/dtn/bundleDeliveried`
#echo "bundlelist: $bundlelist"
while [ 1 ]
do
newfile=`ls -t /var/dtn/bundleDeliveried | head -5`
newlist=""
for str in $newfile
do
newlist="$str $newlist"
done
for str in $newlist
do
if [[ $bundlelist =~ $str ]];then
continue
else
fileroute="/var/dtn/bundleDeliveried/"
fileroute="$fileroute$str"
bundlelist="$bundlelist $str"
cmd=`cat $fileroute`
#neighbour bundle
if [[ "$cmd" == "22 serialization"* ]]; then
continue
#jpg file
elif [[ "$cmd" == $fileflag* ]]; then
#recover file
sed -i "s/$fileflag//g" $fileroute
echo "receive a file $str"
cp $fileroute /home/grui/2017newwork/dtn2_20170223/ion/testfile$num
num=`expr $num + 1`
#json flie
else
tmp=`echo ${cmd} | jq -c '.retrieval'`
if [ "$tmp" != "null" ]; then
##retrieval
#If this node is the cmd sender, then
# send this cmd to the destination
#else if this node is the cmd executor, then
# excute the cmd and send replying
echo ${cmd} | jq '.'
SADDR=`echo ${cmd} | jq -c '.retrieval.saddr'`
SADDR=`echo ${SADDR} | sed 's/\"//g'`
DADDR=`echo ${cmd} | jq -c '.retrieval.daddr'`
DADDR=`echo ${DADDR} | sed 's/\"//g'`
REQUEST_FILE=`echo ${cmd} | jq -c '.retrieval.file'`
REQUEST_FILE=`echo ${REQUEST_FILE} | sed 's/\"//g'`
DAREA=`echo ${cmd} | jq -c '.retrieval.darea'`
DAREA=`echo ${DAREA} | sed 's/\"//g'`
SAREA=`echo ${cmd} | jq -c '.retrieval.sarea'`
SAREA=`echo ${SAREA} | sed 's/\"//g'`
if [ "$SADDR" = "$THIS_ADDR" ]; then
echo "dtnsend -s ${THIS_ADDR} -d ${DADDR} -t m -p ${cmd} -g ${DAREA}"
/home/grui/2017newwork/dtn2_20170223/DTN/DTN2/apps/dtnsend/dtnsend -s ${THIS_ADDR} -d ${DADDR} -t m -p ${cmd} -g ${DAREA}
elif [ "$DADDR" = "$THIS_ADDR" ]; then
#Checking the requested file
FILE_VALID=1
if [ -f $REQUEST_FILE ]; then
echo "dtnsend -s ${THIS_ADDR} -d ${SADDR} -t f -p ${REQUEST_FILE} -g ${SAREA}"
#handle REQUEST_FILE
filecontent=`cat $REQUEST_FILE`
filecontent="$fileflag$filecontent"
echo "$filecontent" > $REQUEST_FILE
/home/grui/2017newwork/dtn2_20170223/DTN/DTN2/apps/dtnsend/dtnsend -s ${THIS_ADDR} -d ${SADDR} -t f -p ${REQUEST_FILE} -g ${SAREA}
FILE_VALID=1
#recover REQUEST_FILE
sed -i "s/$fileflag//g" $REQUEST_FILE
else
echo "File not exist!"
FILE_VALID=-1
fi
#Sending replying
echo "Replying!"
echo "dtnsend -s ${THIS_ADDR} -d ${SADDR} -t m -p {\"replying\":{\"retrieval\":\"${FILE_VALID}\"\,\"port\":\"${RECVPORT}\"}} -g ${SAREA}"
/home/grui/2017newwork/dtn2_20170223/DTN/DTN2/apps/dtnsend/dtnsend -s ${THIS_ADDR} -d ${SADDR} -t m -p {\"replying\":{\"retrieval\":\"${FILE_VALID}\"\,\"port\":\"${RECVPORT}\"}} -g ${SAREA}
fi
fi
##CMD--Replying
tmp=`echo ${cmd} | jq -c '.replying'`
if [ "$tmp" != "null" ]; then
echo ${cmd} | jq '.'
RECVPORT=`echo ${cmd} | jq -c '.replying.port'`
RECVPORT=`echo ${RECVPORT} | sed 's/\"//g'`
# bprecvfile ${THIS_ADDR}.${RECVPORT} 1
fi
fi
fi
done
done
|
xyongcn/dtn2
|
ion/json_cmd_listener.sh
|
Shell
|
mit
| 3,320 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:0988
#
# Security announcement date: 2015-05-12 19:26:11 UTC
# Script generation date: 2017-01-25 21:22:45 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i686:38.0-4.el6_6
# - firefox-debuginfo.i686:38.0-4.el6_6
# - firefox.x86_64:38.0-4.el6_6
# - firefox-debuginfo.x86_64:38.0-4.el6_6
#
# Last versions recommanded by security team:
# - firefox.i686:45.7.0-1.el6_8
# - firefox-debuginfo.i686:45.7.0-1.el6_8
# - firefox.x86_64:45.7.0-1.el6_8
# - firefox-debuginfo.x86_64:45.7.0-1.el6_8
#
# CVE List:
# - CVE-2015-0797
# - CVE-2015-2708
# - CVE-2015-2710
# - CVE-2015-2713
# - CVE-2015-2716
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i686-45.7.0 -y
sudo yum install firefox-debuginfo.i686-45.7.0 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install firefox-debuginfo.x86_64-45.7.0 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2015/RHSA-2015:0988.sh
|
Shell
|
mit
| 1,070 |
#!/bin/sh
set -eo pipefail -o nounset
## Get the .genome file
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
## Get and process the tRNA Genes from from UCSC
wget --quiet -O - http://hgdownload.soe.ucsc.edu/goldenPath/hg19/database/tRNAs.txt.gz \
| gzip -dc \
| awk -v OFS="\t" -v FS="\t" 'BEGIN {print "#Table Info: https://genome.ucsc.edu/cgi-bin/hgTables?db=hg19&hgta_group=genes&hgta_track=tRNAs&hgta_table=tRNAs&hgta_doSchema=describe+table+schema\n#Genomic tRNA Database (GtRNAdb) Summary URL: http://gtrnadb2.ucsc.edu/genomes/eukaryota/Hsapi19/\n#chrom\tstart\tend\tstrand\ttRNA_gene_name\tamino_acid\tanti_codon\tintron_coordinates\ttRNA_ScanSE_Score\tGenomic_tRNA_Database_alignment_URL"}
{gsub("</BLOCKQUOTE>","", $10); gsub("<BLOCKQUOTE>","",$10); gsub(" ","-",$10); print $2,$3,$4,$7,$5,$8,$9,$10,$11,$13}' \
| gsort /dev/stdin $genome \
| bgzip -c > hg19-trna-genes-ucsc-v1.bed.gz
tabix hg19-trna-genes-ucsc-v1.bed.gz
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/hg19/hg19-trna-genes-ucsc-v1/recipe.sh
|
Shell
|
mit
| 1,040 |
#!/bin/bash
# Set variables
# -----------------------------------
DREAMBOT_GITHUB_FOLDER_NAME="dreambot1.0.0"
DREAMBOT_GITHUB_FILE_NAME="dreambot-1.0.0"
# Set functions
# -----------------------------------
logMessage () {
echo " $1"
echo " ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
}
echo ""
echo " ============================================================"
echo " Instalador TraderBot - DreamersTraders.com"
echo ""
echo " Esto tomará algunos minutos"
echo ""
echo " ============================================================"
echo ""
logMessage "(1/6) Actualizando la Base del sistema"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
apt-get -qq update > /dev/null 2>&1
logMessage "(2/6) Instalando nodejs 6.x"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
curl -qsL https://deb.nodesource.com/setup_6.x | bash - > /dev/null 2>&1
apt-get -y -qq install nodejs > /dev/null 2>&1
logMessage "(3/6) Instalando Herramientas"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
apt-get -y -qq install unzip > /dev/null 2>&1
npm install -g pm2 [email protected] dreambot-monitor > /dev/null 2>&1
logMessage "(4/6) Instalando TraderBot"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
wget -q https://github.com/DreamersTraders/dreambot/releases/download/dreambot1.0.0/dreambot-1.0.0.zip -P /opt/
unzip -o -qq /opt/${DREAMBOT_GITHUB_FILE_NAME}.zip -d /opt/unzip-tmp
# create folder for the current version.
sudo mkdir /opt/${DREAMBOT_GITHUB_FILE_NAME} -p
# Copy only the executables.
cp /opt/unzip-tmp/dreambot-* /opt/${DREAMBOT_GITHUB_FILE_NAME}
# creates a symbolic link to the DREAMBOT folder.
rm /opt/DREAMBOT > /dev/null 2>&1
ln -s /opt/${DREAMBOT_GITHUB_FILE_NAME} /opt/dreambot
# Cleanup
sudo rm /opt/${DREAMBOT_GITHUB_FILE_NAME}.zip
sudo rm -R /opt/unzip-tmp
# Set rights
sudo chmod +x /opt/dreambot/dreambot-*
logMessage "(5/6) Agregando los comandos"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
echo "" >> ~/.bashrc
echo "# DREAMBOT ALIASES" >> ~/.bashrc
echo "alias gcd='cd /opt/DREAMBOT'" >> ~/.bashrc
echo "alias ginit='gcd && yo dreambot init'" >> ~/.bashrc
echo "alias gadd='gcd && yo dreambot add'" >> ~/.bashrc
echo "alias gl='pm2 l'" >> ~/.bashrc
echo "alias glog='pm2 logs'" >> ~/.bashrc
echo "alias gstart='pm2 start'" >> ~/.bashrc
echo "alias gstop='pm2 stop'" >> ~/.bashrc
logMessage "(6/6) Generador de archivos"
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create folder for yeoman.
sudo chmod g+rwx /root
sudo chmod g+rwx /opt/dreambot
# Yeoman write rights.
sudo mkdir /root/.config/configstore -p
cat > /root/.config/configstore/insight-yo.json << EOM
{
"clientId": 1337,
"optOut": true
}
EOM
sudo chmod g+rwx /root/.config
sudo chmod g+rwx /root/.config/configstore
sudo chmod g+rw /root/.config/configstore/*
# pm2 write rights.
sudo mkdir /root/.pm2 -p
echo "1337" > /root/.pm2/touch
sudo chmod g+rwx /root/.pm2
sudo chmod g+rw /root/.pm2/*
echo ""
echo " ============================================================"
echo " Configuración completa!"
echo ""
echo " Por favor corra los siguientes comandos "
echo " para iniciar el TraderBot:"
echo " gcd"
echo " ginit"
echo ""
echo " ============================================================"
echo ""
|
DreamersTraders/generator-dreambot
|
install-loud.sh
|
Shell
|
mit
| 3,316 |
#!/bin/bash
# ----------------------
# KUDU Deployment Script
# Version: 0.1.7
# ----------------------
# Helpers
# -------
exitWithMessageOnError () {
if [ ! $? -eq 0 ]; then
echo "An error has occurred during web site deployment."
echo $1
exit 1
fi
}
# Prerequisites
# -------------
# Verify node.js installed
hash node 2>/dev/null
exitWithMessageOnError "Missing node.js executable, please install node.js, if already installed make sure it can be reached from current environment."
# Setup
# -----
SCRIPT_DIR="${BASH_SOURCE[0]%\\*}"
SCRIPT_DIR="${SCRIPT_DIR%/*}"
ARTIFACTS=$SCRIPT_DIR/../artifacts
KUDU_SYNC_CMD=${KUDU_SYNC_CMD//\"}
if [[ ! -n "$DEPLOYMENT_SOURCE" ]]; then
DEPLOYMENT_SOURCE=$SCRIPT_DIR
fi
if [[ ! -n "$NEXT_MANIFEST_PATH" ]]; then
NEXT_MANIFEST_PATH=$ARTIFACTS/manifest
if [[ ! -n "$PREVIOUS_MANIFEST_PATH" ]]; then
PREVIOUS_MANIFEST_PATH=$NEXT_MANIFEST_PATH
fi
fi
if [[ ! -n "$DEPLOYMENT_TARGET" ]]; then
DEPLOYMENT_TARGET=$ARTIFACTS/wwwroot
else
KUDU_SERVICE=true
fi
if [[ ! -n "$KUDU_SYNC_CMD" ]]; then
# Install kudu sync
echo Installing Kudu Sync
npm install kudusync -g --silent
exitWithMessageOnError "npm failed"
if [[ ! -n "$KUDU_SERVICE" ]]; then
# In case we are running locally this is the correct location of kuduSync
KUDU_SYNC_CMD=kuduSync
else
# In case we are running on kudu service this is the correct location of kuduSync
KUDU_SYNC_CMD=$APPDATA/npm/node_modules/kuduSync/bin/kuduSync
fi
fi
# Node Helpers
# ------------
selectNodeVersion () {
if [[ -n "$KUDU_SELECT_NODE_VERSION_CMD" ]]; then
SELECT_NODE_VERSION="$KUDU_SELECT_NODE_VERSION_CMD \"$DEPLOYMENT_SOURCE\" \"$DEPLOYMENT_TARGET\" \"$DEPLOYMENT_TEMP\""
eval $SELECT_NODE_VERSION
exitWithMessageOnError "select node version failed"
if [[ -e "$DEPLOYMENT_TEMP/__nodeVersion.tmp" ]]; then
NODE_EXE=`cat "$DEPLOYMENT_TEMP/__nodeVersion.tmp"`
exitWithMessageOnError "getting node version failed"
fi
if [[ -e "$DEPLOYMENT_TEMP/.tmp" ]]; then
NPM_JS_PATH=`cat "$DEPLOYMENT_TEMP/__npmVersion.tmp"`
exitWithMessageOnError "getting npm version failed"
fi
if [[ ! -n "$NODE_EXE" ]]; then
NODE_EXE=node
fi
NPM_CMD="\"$NODE_EXE\" \"$NPM_JS_PATH\""
else
NPM_CMD=npm
NODE_EXE=node
fi
}
##################################################################################################################################
# Deployment
# ----------
echo Handling node.js deployment.
# 1. KuduSync
if [[ "$IN_PLACE_DEPLOYMENT" -ne "1" ]]; then
"$KUDU_SYNC_CMD" -v 50 -f "$DEPLOYMENT_SOURCE" -t "$DEPLOYMENT_TARGET" -n "$NEXT_MANIFEST_PATH" -p "$PREVIOUS_MANIFEST_PATH" -i ".git;.hg;.deployment;deploy.sh"
exitWithMessageOnError "Kudu Sync failed"
fi
# 2. Select node version
selectNodeVersion
# # 3. Install npm packages
# if [ -e "$DEPLOYMENT_TARGET/package.json" ]; then
# cd "$DEPLOYMENT_TARGET"
# eval $NPM_CMD install --production
# exitWithMessageOnError "npm failed"
# cd - > /dev/null
# fi
# # 4. Install bower packages
# if [ -e "$DEPLOYMENT_TARGET/bower.json" ]; then
# cd "$DEPLOYMENT_TARGET"
# eval $NPM_CMD install bower
# exitWithMessageOnError "installing bower failed"
# ./node_modules/.bin/bower install
# exitWithMessageOnError "bower failed"
# cd - > /dev/null
# fi
# 5. Run grunt
# if [ -e "$DEPLOYMENT_TARGET/Gruntfile.js" ]; then
# cd "$DEPLOYMENT_TARGET"
# eval $NPM_CMD install grunt-cli
# exitWithMessageOnError "installing grunt failed"
# ./node_modules/.bin/grunt --no-color build
# exitWithMessageOnError "grunt failed"
# cd - > /dev/null
# fi
##################################################################################################################################
# Post deployment stub
if [[ -n "$POST_DEPLOYMENT_ACTION" ]]; then
POST_DEPLOYMENT_ACTION=${POST_DEPLOYMENT_ACTION//\"}
cd "${POST_DEPLOYMENT_ACTION_DIR%\\*}"
"$POST_DEPLOYMENT_ACTION"
exitWithMessageOnError "post deployment action failed"
fi
echo "Finished successfully."
|
Famospace/Famo.us-Monospace
|
deploy.sh
|
Shell
|
mit
| 4,072 |
#!/bin/sh
cd -- "$(dirname -- "${BASH_SOURCE:-$0}")"
zip -j runestar.zip runestar.cmd runestar.command ../LICENSE ../NOTICE
|
RuneSuite/client
|
bootstrap/zip.sh
|
Shell
|
mit
| 126 |
#!/bin/bash
# modified from http://ficate.com/blog/2012/10/15/battery-life-in-the-land-of-tmux/
HEART='♥ '
battery_info=`ioreg -rc AppleSmartBattery`
current_charge=$(echo $battery_info | grep -o '"CurrentCapacity" = [0-9]\+' | awk '{print $3}')
total_charge=$(echo $battery_info | grep -o '"MaxCapacity" = [0-9]\+' | awk '{print $3}')
charged_slots=$(echo "(($current_charge/$total_charge)*5)+1" | bc -l | cut -d '.' -f 1)
if [[ $charged_slots -gt 5 ]]; then
charged_slots=5
fi
echo -n '#[fg=colour196]'
for i in `seq 1 $charged_slots`; do echo -n "$HEART"; done
if [[ $charged_slots -lt 5 ]]; then
echo -n '#[fg=colour254]'
for i in `seq 1 $(echo "5-$charged_slots" | bc)`; do echo -n "$HEART"; done
fi
echo -n '#[fg=colour241]| '
echo $(ioreg -l | awk '$3~/Capacity/{c[$3]=$5}END{OFMT="%.2f%%";max=c["\"MaxCapacity\""];print(max>0?100*c["\"CurrentCapacity\""]/max:"?")}')
|
linkux-it/linkux-dev
|
conf/osx/sh/battery_indicator.sh
|
Shell
|
mit
| 889 |
#!/bin/bash
mkdir -p /dev/shm/images
raspistill -o /dev/shm/images/test.jpg
result=`python detectFruit.py http://YOUR-EXTERNAL-IP:5005/images/test.jpg`
if [ $result == 1 ] ; then
`curl -X PATCH -d
'{"checkout_state":{"workflow_state":"shopping"},"items":{"1069829":{"created_at":1.409336316211E9,"qty":1,"user_id":YOUR_USER_ID},"8182033":{"created_at":1.431448385824E9,"qty":2,"user_id":YOUR_USER_ID},"8583398":{"created_at":1.431448413452E9,"qty":3,"user_id":YOUR_USER_ID},"8585519":{"created_at":1.431448355207E9,"qty":3,"user_id":YOUR_USER_ID},"8601780":{"created_at":1.424915467829E9,"qty":3,"user_id":YOUR_USER_ID},"8602830":{"created_at":1.43144840911E9,"qty":1,"user_id":YOUR_USER_ID}},"users":{"-JXAzAp6rgtM4u2dV2tI":{"id":YOUR_USER_ID,"name":"StevenH"},"-Jj2_kFsu5hvZRhx4KX1":{"id":YOUR_USER_ID,"name":"StevenH"}}}' https://instacart.firebaseio.com/carts/YOUR_HASH.json`
fi
|
StevenHickson/AutonomousFridge
|
test.sh
|
Shell
|
mit
| 892 |
#!/bin/sh
# Copyright (c) 2014-2016 The PlanBcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
DIR=$(dirname "$0")
[ "/${DIR#/}" != "$DIR" ] && DIR=$(dirname "$(pwd)/$0")
echo "Using verify-commits data from ${DIR}"
VERIFIED_ROOT=$(cat "${DIR}/trusted-git-root")
VERIFIED_SHA512_ROOT=$(cat "${DIR}/trusted-sha512-root-commit")
REVSIG_ALLOWED=$(cat "${DIR}/allow-revsig-commits")
HAVE_FAILED=false
HAVE_GNU_SHA512=1
[ ! -x "$(which sha512sum)" ] && HAVE_GNU_SHA512=0
if [ x"$1" = "x" ]; then
CURRENT_COMMIT="HEAD"
else
CURRENT_COMMIT="$1"
fi
if [ "${CURRENT_COMMIT#* }" != "$CURRENT_COMMIT" ]; then
echo "Commit must not contain spaces?" > /dev/stderr
exit 1
fi
VERIFY_TREE=0
if [ x"$2" = "x--tree-checks" ]; then
VERIFY_TREE=1
fi
NO_SHA1=1
PREV_COMMIT=""
while true; do
if [ "$CURRENT_COMMIT" = $VERIFIED_ROOT ]; then
echo "There is a valid path from "$CURRENT_COMMIT" to $VERIFIED_ROOT where all commits are signed!"
exit 0;
fi
if [ "$CURRENT_COMMIT" = $VERIFIED_SHA512_ROOT ]; then
if [ "$VERIFY_TREE" = "1" ]; then
echo "All Tree-SHA512s matched up to $VERIFIED_SHA512_ROOT" > /dev/stderr
fi
VERIFY_TREE=0
NO_SHA1=0
fi
if [ "$NO_SHA1" = "1" ]; then
export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=0
else
export BITCOIN_VERIFY_COMMITS_ALLOW_SHA1=1
fi
if [ "${REVSIG_ALLOWED#*$CURRENT_COMMIT}" != "$REVSIG_ALLOWED" ]; then
export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=1
else
export BITCOIN_VERIFY_COMMITS_ALLOW_REVSIG=0
fi
if ! git -c "gpg.program=${DIR}/gpg.sh" verify-commit "$CURRENT_COMMIT" > /dev/null; then
if [ "$PREV_COMMIT" != "" ]; then
echo "No parent of $PREV_COMMIT was signed with a trusted key!" > /dev/stderr
echo "Parents are:" > /dev/stderr
PARENTS=$(git show -s --format=format:%P $PREV_COMMIT)
for PARENT in $PARENTS; do
git show -s $PARENT > /dev/stderr
done
else
echo "$CURRENT_COMMIT was not signed with a trusted key!" > /dev/stderr
fi
exit 1
fi
# We always verify the top of the tree
if [ "$VERIFY_TREE" = 1 -o "$PREV_COMMIT" = "" ]; then
IFS_CACHE="$IFS"
IFS='
'
for LINE in $(git ls-tree --full-tree -r "$CURRENT_COMMIT"); do
case "$LINE" in
"12"*)
echo "Repo contains symlinks" > /dev/stderr
IFS="$IFS_CACHE"
exit 1
;;
esac
done
IFS="$IFS_CACHE"
FILE_HASHES=""
for FILE in $(git ls-tree --full-tree -r --name-only "$CURRENT_COMMIT" | LC_ALL=C sort); do
if [ "$HAVE_GNU_SHA512" = 1 ]; then
HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | sha512sum | { read FIRST OTHER; echo $FIRST; } )
else
HASH=$(git cat-file blob "$CURRENT_COMMIT":"$FILE" | shasum -a 512 | { read FIRST OTHER; echo $FIRST; } )
fi
[ "$FILE_HASHES" != "" ] && FILE_HASHES="$FILE_HASHES"'
'
FILE_HASHES="$FILE_HASHES$HASH $FILE"
done
if [ "$HAVE_GNU_SHA512" = 1 ]; then
TREE_HASH="$(echo "$FILE_HASHES" | sha512sum)"
else
TREE_HASH="$(echo "$FILE_HASHES" | shasum -a 512)"
fi
HASH_MATCHES=0
MSG="$(git show -s --format=format:%B "$CURRENT_COMMIT" | tail -n1)"
case "$MSG -" in
"Tree-SHA512: $TREE_HASH")
HASH_MATCHES=1;;
esac
if [ "$HASH_MATCHES" = "0" ]; then
echo "Tree-SHA512 did not match for commit $CURRENT_COMMIT" > /dev/stderr
exit 1
fi
fi
PARENTS=$(git show -s --format=format:%P "$CURRENT_COMMIT")
for PARENT in $PARENTS; do
PREV_COMMIT="$CURRENT_COMMIT"
CURRENT_COMMIT="$PARENT"
break
done
done
|
planbcoin/planbcoin
|
contrib/verify-commits/verify-commits.sh
|
Shell
|
mit
| 3,511 |
#!/usr/bin/env bash
#
# Model: FRITZ!Box 7490 (7590?)
# Fritz!OS: 7.27
# author: Michael Dinkelaker,
# michael[dot]dinkelaker[at]gmail[dot]com
#
# version history:
# 0.1, 2016-06-03, first release
# 0.2, 2017-03-03, fix for Fritz!OS 6.80
# 0.3, 2020-10-05, fix for Fritz!OS 7.21
# 0.4, 2020-06-20, fix for Fritz!OS 7.27
# added export_phoneassets. Login needs a Username now.
#
# example usage:
# source lib_fritz7490.sh
# _FBOX="http://192.178.0.1"
# _PASSWORD="secret_fbox_login"
# _EXPORT_PASSWORD="same_or_another_secret"
# login
# export_settings myExportPassword > /some/location/$(date +%Y-%m-%d)_fritz_settings.export
# export_phoneassets myExportPassword > /some/location/$(date +%Y-%m-%d)_fritz_phone.assets.zip
# export_phonebook 0 Telefonbuch > /some/location/$(date +%Y-%m-%d)_telefonbuch.xml
# export_phonebook 1 Work > /some/location/$(date +%Y-%m-%d)_work.xml
#
function login() {
# check if environment variables are setup correctly
if [[ -z ${_FBOX} ]] || [[ -z ${_PASSWORD} ]] || [[ -z ${_USERNAME} ]]; then
echo "Error: make sure VARS _FBOX, _PASSWORD and _USERNAME are set!!!"
exit 1
fi
get_challenge
get_md5
# assemble challenge key and md5
_RESPONSE="${_CHALLENGE}"-"${_MD5}"
get_sid
}
# get configuration from FritzBox and write to STDOUT
# argument 1: export password
function export_settings() {
local _EXPORT_PASSWORD=$1
if [[ -z ${_EXPORT_PASSWORD} ]]; then
echo "Error: EXPORT_PASSWORD is empty!!!"
exit 1
fi
curl -s \
-k \
-F 'sid='${_SID} \
-F 'ImportExportPassword='${_EXPORT_PASSWORD} \
-F 'ConfigExport=' \
${_FBOX}/cgi-bin/firmwarecfg
}
# get phone assets from FritzBox and write to STDOUT
# argument 1: export password
function export_phoneassets() {
local _EXPORT_PASSWORD=$1
if [[ -z ${_EXPORT_PASSWORD} ]]; then
echo "Error: EXPORT_PASSWORD is empty!!!"
exit 1
fi
curl -s \
-k \
-F 'sid='${_SID} \
-F 'AssetsImportExportPassword='${_EXPORT_PASSWORD} \
-F 'AssetsExport=' \
${_FBOX}/cgi-bin/firmwarecfg
}
# get phonebook from FritzBox and write to STDOUT
# argument 1: PhoneBookId
# argument 2: PhoneBookExportName
function export_phonebook() {
local _PhoneBookId=$1
local _PhoneBookExportName=$2
local isnum='^[0-9]+$'
if [[ -z ${_PhoneBookExportName} ]] || ! [[ ${_PhoneBookId} =~ ${isnum} ]]; then
echo "Error: PhoneBookExportName is empty or PhoneBookId isn't a number!!!"
exit 1
fi
curl -s \
-k \
-F 'sid='${_SID} \
-F 'PhonebookId='${_PhoneBookId} \
-F 'PhonebookExportName='${_PhoneBookExportName} \
-F 'PhonebookExport=' \
${_FBOX}/cgi-bin/firmwarecfg
}
########################################################################################################################
# authentication helpers
# get challenge key
function get_challenge() {
_CHALLENGE=$(curl -s \
-k \
${_FBOX}/login_sid.lua | \
grep -Po '<Challenge>.*?</Challenge>' | \
sed 's/\(<Challenge>\|<\/Challenge>\)//g')
if [[ -z ${_CHALLENGE} ]]; then
echo "ERROR: received empty challenge"
exit 1
fi
}
# build md5 from challenge key and password
function get_md5() {
_MD5=$(echo -n \
${_CHALLENGE}"-"${_PASSWORD} | \
iconv -f ISO8859-1 \
-t UTF-16LE | \
md5sum -b | \
awk '{print substr($0,1,32)}')
}
function get_sid() {
_SID=$(curl -i \
-s \
-k \
-d 'response='${_RESPONSE} \
-d 'username='${_USERNAME} \
-d 'page=' \
${_FBOX}/login_sid.lua | \
grep -Po '<SID>.*?</SID>' | \
sed 's/\(<SID>\|<\/SID>\)//g')
if [[ "${_SID}" == "0000000000000000"} ]]; then
echo "ERROR: got invalid sid!"
exit 1
fi
}
########################################################################################################################
|
m1d1/fritzbox7490_backup
|
lib_fritz7490.sh
|
Shell
|
mit
| 4,235 |
#
# http://nginx.org/en/linux_packages.html?_ga=1.165512447.1124031743.1468906423
#
# https://www.digitalocean.com/community/tutorials/how-to-secure-nginx-with-let-s-encrypt-on-ubuntu-14-04
# add source
curl http://nginx.org/keys/nginx_signing.key | apt-key add -
cat <<'EOF' > /etc/apt/sources.list.d/nginx.list
# trusty for ubuntu 14.04, xenial for 16.04
deb http://nginx.org/packages/ubuntu/ trusty nginx
deb-src http://nginx.org/packages/ubuntu/ trusty nginx
EOF
# install nginx 1.10 (the stable version)
apt-get update && apt-get install -y nginx
cat <<'EOF'> /etc/nginx/conf.d/default.conf
server {
# http2 server
listen 443 ssl http2 default_server;
listen [::]:443 ssl http2 default_server;
server_name _;
ssl_certificate /etc/nginx/ssl/http2.horde.live.crt.pem;
ssl_certificate_key /etc/nginx/ssl/http2.horde.live.key.pem;
ssl_ciphers EECDH+CHACHA20:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
ssl_dhparam /etc/nginx/ssl/dhparam.pem;
ssl_session_cache shared:SSL:5m;
ssl_session_timeout 1h;
charset utf-8;
access_log /var/log/nginx/ssl.access.log main;
add_header Strict-Transport-Security "max-age=15768000; includeSubDomains: always;";
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server {
listen 80 default_server;
listen [::]:80 default_server;
server_name _;
access_log /var/log/nginx/http.access.log main;
location / {
# uncomment return 301 after letencrypto setup ok
# return 301 https://$server_name$request_uri;
root /usr/share/nginx/html;
# index index.html index.htm;
}
location ~ /.well-known {
root /usr/share/nginx/html;
allow all;
}
}
EOF
mkdir -p /etc/nginx/ssl
mkdir -p /usr/share/nginx/html/.well-known
apt-get -y install git bc
mkdir -p /opt
git clone https://github.com/letsencrypt/letsencrypt /opt/letsencrypt
# config A record of horde.live http2.horde.live www.horde.live to ip of this nginx server
cd /opt/letsencrypt && ./letsencrypt-auto certonly -a webroot --webroot-path=/usr/share/nginx/html -d horde.live -d http2.horde.live -d www.horde.live
ls -alh /etc/letsencrypt/live/horde.live/
cat /etc/letsencrypt/live/horde.live/fullchain.pem >/etc/nginx/ssl/http2.horde.live.crt.pem
cat /etc/letsencrypt/live/horde.live/privkey.pem >/etc/nginx/ssl/http2.horde.live.key.pem
chmod 0644 /etc/nginx/ssl/http2.horde.live.crt.pem
chmod 0600 /etc/nginx/ssl/http2.horde.live.key.pem
nginx -t && service nginx reload
# enable 301 in http server after https is ok
|
wheelcomplex/freebsd-desktop
|
nginx-http2.setup.sh
|
Shell
|
cc0-1.0
| 3,474 |
closure-library/closure/bin/build/closurebuilder.py \
--namespace "myproject.start" \
--root . \
--output_mode=compiled \
--compiler_jar=compiler.jar \
--compiler_flags="--compilation_level=ADVANCED_OPTIMIZATIONS" \
> hello-compiled.js
|
trajano/hello-closure
|
build.sh
|
Shell
|
epl-1.0
| 248 |
#!/bin/bash
# Written by Oded Gabbay
# [email protected]
# openSUSE support and detection of built-in modules by Joerg Roedel <[email protected]>
# Add /sbin and /usr/sbin to path in case it is not already and lspci is there
PATH=$PATH:/sbin:/usr/sbin
usage()
{
echo -e "\nusage: $1 [options]\n"
echo -e "options:\n"
echo -e " -h, --help Prints this help"
}
__scriptname="kfd_check_installation.sh"
__pass_flag="YES"
kv_supported_types="1304 1305 1306 1307 1309 130a 130b 130c 130d 130e 130f 1310 1311 1312 1313 1315 1316 1317 1318 131b 131c 131d"
cz_supported_types="9874"
kfd_supported_perms="666 777"
# parameter while-loop
while [[ "$1" != "" ]];
do
case $1 in
-h | --help )
usage $__scriptname
exit 0
;;
*)
echo "The parameter $1 is not allowed"
usage $__scriptname
exit 1 # error
;;
esac
shift
done
kv_exists=$(lspci -nn | grep -c Kaveri)
if [[ $kv_exists != "0" ]]; then
kv_exists_result="Yes"
kv_type=$(lspci -nn | grep VGA | awk -F':' '{print $4}' | awk -F']' '{print $1}')
if [[ ! -n "$kv_type" ]]; then
kv_type_result="NO"
__pass_flag="NO"
elif [[ $kv_supported_types =~ $kv_type ]]; then
kv_type_result="Yes"
else
kv_type_result="NO"
__pass_flag="NO"
fi
else
kv_exists_result="NO"
kv_type_result="N/A"
cz_exists_result="NO"
for i in $cz_supported_types; do
cz_exists=$(lspci -nn | grep -c "1002:$i")
if [[ $cz_exists != "0" ]]; then
cz_exists_result="Yes"
cz_type_result="Yes"
fi
done
if [[ $cz_exists_result == "NO" ]]; then
cz_type_result="N/A"
__pass_flag="NO"
fi
fi
radeon_exists=$(grep -c -w radeon_pci_probe /proc/kallsyms)
amdgpu_exists=$(grep -c -w amdgpu_pci_probe /proc/kallsyms)
kfd_exists=$(grep -c -w kgd2kfd_init /proc/kallsyms)
iommu_exists=$(grep -c -w amd_iommu_bind_pasid /proc/kallsyms)
if [[ $kv_exists_result == "Yes" ]]; then
if [[ $radeon_exists == "1" ]]; then
radeon_exists_result="Yes"
radeon_blacklisted="0"
else
radeon_exists_result="NO"
radeon_blacklisted=$(grep blacklist /etc/modprobe.d/* | grep -c -w radeon)
__pass_flag="NO"
fi
fi
if [[ $cz_exists_result == "Yes" ]]; then
if [[ $amdgpu_exists == "1" ]]; then
amdgpu_exists_result="Yes"
amdgpu_blacklisted="0"
else
amdgpu_exists_result="NO"
amdgpu_blacklisted=$(grep blacklist /etc/modprobe.d/* | grep -c -w amdgpu)
__pass_flag="NO"
fi
fi
if [[ $kfd_exists == "1" ]]; then
kfd_exists_result="Yes"
else
kfd_exists_result="NO"
__pass_flag="NO"
fi
if [[ $iommu_exists == "1" ]]; then
iommu_exists_result="Yes"
else
iommu_exists_result="NO"
__pass_flag="NO"
fi
if [[ -e /dev/kfd ]]; then
kfd_dev_exists_result="Yes"
kfd_perms=$(stat -c %a /dev/kfd)
if [[ $kfd_supported_perms == *$kfd_perms* ]]; then
kfd_perms_result="Yes"
else
kfd_perms_result="NO"
__pass_flag="NO"
fi
else
kfd_dev_exists_result="NO"
kfd_perms_result="NO"
__pass_flag="NO"
fi
if [[ -e /sys/devices/virtual/kfd/kfd/topology/nodes/0/gpu_id ]]; then
gpu_id_result="Yes"
gpu_id=$(cat /sys/devices/virtual/kfd/kfd/topology/nodes/0/gpu_id)
if [[ $gpu_id == "0" ]]; then
gpu_id_result="NO"
__pass_flag="NO"
fi
else
gpu_id_result="NO"
__pass_flag="NO"
fi
# Print results
echo -e ""
if [[ $kv_exists_result == "Yes" ]]; then
echo -e "Kaveri detected:............................$kv_exists_result"
echo -e "Kaveri type supported:......................$kv_type_result"
echo -e "radeon module is loaded:....................$radeon_exists_result"
if [[ ! $radeon_blacklisted == "0" ]]; then
echo -e "Radeon module is blacklisted!!!"
fi
else
echo -e "Carrizo detected:...........................$cz_exists_result"
if [[ $cz_exists_result == "Yes" ]]; then
echo -e "Carrizo type supported:.....................$cz_type_result"
echo -e "amdgpu module is loaded:....................$amdgpu_exists_result"
if [[ ! $amdgpu_blacklisted == "0" ]]; then
echo -e "amdgpu module is blacklisted!!!"
fi
else
echo -e "Kaveri detected:............................$kv_exists_result"
fi
fi
echo -e "amdkfd module is loaded:....................$kfd_exists_result"
echo -e "AMD IOMMU V2 module is loaded:..............$iommu_exists_result"
echo -e "KFD device exists:..........................$kfd_dev_exists_result"
echo -e "KFD device has correct permissions:.........$kfd_perms_result"
echo -e "Valid GPU ID is detected:...................$gpu_id_result"
echo -e ""
echo -e "Can run HSA.................................$__pass_flag"
|
HSAFoundation/HSA-Drivers-Linux-AMD
|
kfd_check_installation.sh
|
Shell
|
gpl-2.0
| 4,442 |
#!/bin/sh
bin/s2fil_filter_construct \
-out data_filter/mf_Tbfly_Bcmb_Stmpl.fil \
-nside 128 \
-filename_dil data_in/dilation_radian.txt \
-bkgnd_cmb data_in/wmap_lcdm_pl_model_yr1_v1.txt \
-noise_var 0.05e0 \
-beam_fwhm 13.0e0 \
-tmpl butterfly \
-filter_heu .true. \
-filter_type mf \
-scale_type tmpl
|
astro-informatics/s2fil
|
filter_construct_mf_bfly.sh
|
Shell
|
gpl-2.0
| 347 |
#!/bin/bash
STAT_FREQ=1
BASE_DIR="$(cd $(dirname $0); pwd)"
COOKIE_FILE="$BASE_DIR/cookies.txt"
TEMP_FILE="/tmp/routerinfo_temp"
GENERAL="--no-check-certificate --keep-session-cookies"
SILENCE="--quiet"
VERBOSE="--verbose --debug"
SAVE_COOKIES="--save-cookies $COOKIE_FILE"
LOAD_COOKIES="--load-cookies $COOKIE_FILE"
ROUTER_IP=$(ip r list | grep default | cut -d " " -f 3)
URL=http://$ROUTER_IP/html/status/overview.asp
UPRATE_MAX=0
DOWNRATE_MAX=0
LAST_REC=0
format_number() {
BASE=
SUFFIX=
SCALE=1
case $2 in
speed) SUFFIX="b/s";;
size) SUFFIX="B";;
*) SUFFIX="(???)";;
esac
if [ -n "$3" ]; then
BASE=$3
else
if [ $(( $1/(1024*1024*1024) )) != 0 ]; then
BASE=G
elif [ $(( $1/(1024*1024) )) != 0 ]; then
BASE=M
elif [ $(( $1/1024 )) != 0 ]; then
BASE=K
fi
fi
[ -n "$4" ] && SCALE=$4
case $BASE in
G) echo -n "$(bc <<< "scale=$SCALE;$1/(1024*1024*1024)")G${SUFFIX}";;
M) echo -n "$(bc <<< "scale=$SCALE;$1/(1024*1024)")M${SUFFIX}";;
K) echo -n "$(bc <<< "scale=$SCALE;$1/(1024)")K${SUFFIX}";;
*) echo -n "$1${SUFFIX}";;
esac
}
while true; do
wget $GENERAL $SILENCE $LOAD_COOKIES $URL -O $TEMP_FILE
STATISTICS=$(cat $TEMP_FILE | grep WanStatistics | tr -d "'")
IPDNS=$(cat $TEMP_FILE | grep wanIPDNS | tr -d "'")
#echo "========"
#echo $STATISTICS
#echo "========"
#echo $IPDNS
#echo "========"
IP_V4=$(echo $IPDNS | awk '{ print $4 }')
UPRATE=$(echo $STATISTICS | awk '{ print $5 }')
DOWNRATE=$(echo $STATISTICS | awk '{ print $9 }')
UPVOLUME=$(echo $STATISTICS | awk '{ print $13 }')
DOWNVOLUME=$(echo $STATISTICS | awk '{ print $17 }')
if [ -z "$DOWNVOLUME" ] || [[ "$DOWNVOLUME" =~ '^[0-9]+$' ]]; then
echo "(no info)"
else
SUMVOLUME=$(( $UPVOLUME + $DOWNVOLUME ))
if [ "$1" == "table" ]; then
printf "%15s ▲ %10s ▼ %10s %7s\n" $IP_V4 \
$(format_number $UPRATE speed K) $(format_number $DOWNRATE speed K) \
$(format_number $SUMVOLUME size G)
else
printf "%s ▲ %s ▼ %s (%s)\n" $IP_V4 \
$(format_number $UPRATE speed) $(format_number $DOWNRATE speed) \
$(format_number $SUMVOLUME size)
fi
[ $UPRATE -gt $UPRATE_MAX ] && UPRATE_MAX=$UPRATE
[ $DOWNRATE -gt $DOWNRATE_MAX ] && DOWNRATE_MAX=$DOWNRATE
NOW=$(date +%s)
if [ $(( $NOW - $LAST_REC )) -gt $STAT_FREQ ]; then
printf "%s %s,%s,%s,%s,%s,%s\n" \
$(date +"%F %T") $IP_V4 \
$UPVOLUME $DOWNVOLUME $UPRATE_MAX $DOWNRATE_MAX >> /var/local/routerinfo.csv
UPRATE_MAX=0
DOWNRATE_MAX=0
LAST_REC=$NOW
fi
fi
[ "$1" = "once" ] && exit
sleep 2
done
|
pfsmorigo/routerinfo
|
routerinfo.sh
|
Shell
|
gpl-2.0
| 2,557 |
#!/usr/bin/env bash
############################################################################
# Portion of Slurm test suite
############################################################################
# Copyright (C) 2015 SchedMD LLC
# Written by Nathan Yee, SchedMD
#
# This file is part of SLURM, a resource management program.
# For details, see <http://slurm.schedmd.com/>.
# Please also read the included file: DISCLAIMER.
#
# SLURM is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along
# with SLURM; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
############################################################################
if [ $# -ne 5 ]; then
echo "test8.21.bash <srun_path> <squeue_path> <job_id> <job_size> <mode:1|2?"
exit 1
fi
srun=$1
squeue=$2
job_id=$3
job_size=$4
test_mode=$5
delay_time=1
while [ $delay_time -le 60 ]
do
$srun -N1 --test-only --immediate /bin/true
rc=$?
if [ $rc -eq 0 ]
then
break
fi
sleep $delay_time
delay_time=`expr $delay_time + 1`
done
if [ $test_mode -gt 1 ]
then
job_size=`expr $job_size + $job_size`
sleep_time=0
else
sleep_time=1
fi
while [ $job_size -ge 2 ]
do
job_size=`expr $job_size / 2`
$srun -N$job_size --test-only sleep 50 &
sleep $sleep_time
done
$srun -N1 --test-only sleep 50 &
sleep 5
$squeue --jobs=$job_id --steps --noheader --format='Step_ID=%i MidplaneList=%N'
|
jabl/slurm
|
testsuite/expect/test8.21.bash
|
Shell
|
gpl-2.0
| 1,908 |
#!/system/xbin/busybox sh
# Original by dorimanx for ExTweaks
# Modified by UpInTheAir for SkyHigh kernels & Synapse
BB=/system/xbin/busybox;
P=/data/media/0/hackerkernel/values/cron_google;
GOOGLE=`cat $P`;
if [ "$($BB mount | grep rootfs | cut -c 26-27 | grep -c ro)" -eq "1" ]; then
$BB mount -o remount,rw /;
fi;
if [ "$GOOGLE" == 1 ]; then
if [ "$($BB pidof com.google.android.gms | wc -l)" -eq "1" ]; then
$BB kill $($BB pidof com.google.android.gms);
fi;
if [ "$($BB pidof com.google.android.gms.unstable | wc -l)" -eq "1" ]; then
$BB kill $($BB pidof com.google.android.gms.unstable);
fi;
if [ "$($BB pidof com.google.android.gms.persistent | wc -l)" -eq "1" ]; then
$BB kill $($BB pidof com.google.android.gms.persistent);
fi;
if [ "$($BB pidof com.google.android.gms.wearable | wc -l)" -eq "1" ]; then
$BB kill $($BB pidof com.google.android.gms.wearable);
fi;
date +%R-%F > /data/crontab/cron-ram-release;
echo " Google RAM released" >> /data/crontab/cron-ram-release;
elif [ "$GOOGLE" == 0 ]; then
date +%R-%F > /data/crontab/cron-ram-release;
echo " Google RAM Release is disabled" >> /data/crontab/cron-ram-release;
fi;
$BB mount -t rootfs -o remount,ro rootfs;
|
HRTKernel/Hacker_Kernel_SM-G92X_MM
|
build/res/crontab/cron-scripts/ram_release.sh
|
Shell
|
gpl-2.0
| 1,206 |
# $Progeny$
#
# Copyright 2005 Progeny Linux Systems, Inc.
#
# This file is part of PDK.
#
# PDK is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# PDK is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PDK; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# The ethereal/tethereal packages have had trouble matching sources
# to binaries.
#
# The xsok packages are also hard. Why?
. atest/test_lib.sh
file_in_pool() {
file="$1"
name=$(basename $file)
[ '1' = $(find repo/pool -name $name | wc -l) ] \
|| fail "Can't find $name."
}
plumb_files() {
files="$@"
rm -rf channel
mkdir channel
cp $files channel/
pushd rchp
pdk channel update
cat >a.xml <<EOF
<?xml version="1.0"?>
<component>
<contents>
<dsc/>
<deb/>
<deb/>
<deb/>
<deb/>
<deb/>
<deb/>
<deb/>
<deb/>
</contents>
</component>
EOF
pdk resolve a.xml
pdk download a.xml
rm -rf repo
pdk repogen a.xml
[ $# = $(find repo/pool -type f | wc -l) ] \
|| fail 'found wrong number of files in pool'
for file in $files; do
file_in_pool $file
done
popd
}
pdk workspace create rchp
dir_channel=$(pwd)/channel
pushd rchp
cat >etc/channels.xml <<EOF
<?xml version="1.0"?>
<channels>
<a>
<type>dir</type>
<path>${dir_channel}</path>
</a>
</channels>
EOF
popd
plumb_files \
packages/gcc_4.0.2-2_i386.deb \
packages/gcc-defaults_1.30.dsc \
packages/gcc-defaults_1.30.tar.gz
file1=packages/ethereal_0.9.13-1.0progeny1.dsc
file2=packages/ethereal_0.9.13-1.0progeny1.diff.gz
file3=packages/ethereal_0.9.13.orig.tar.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.13-1.0progeny1_ia64.deb \
packages/ethereal-dev_0.9.13-1.0progeny1_ia64.deb \
packages/ethereal_0.9.13-1.0progeny1_ia64.deb \
packages/tethereal_0.9.13-1.0progeny1_ia64.deb
file1=packages/ethereal_0.9.13-1.0progeny2.dsc
file2=packages/ethereal_0.9.13-1.0progeny2.diff.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.13-1.0progeny2_ia64.deb \
packages/ethereal-dev_0.9.13-1.0progeny2_ia64.deb \
packages/ethereal_0.9.13-1.0progeny2_ia64.deb \
packages/tethereal_0.9.13-1.0progeny2_ia64.deb
file1=packages/ethereal_0.9.4-1woody2.dsc
file2=packages/ethereal_0.9.4-1woody2.diff.gz
file3=packages/ethereal_0.9.4.orig.tar.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.4-1woody2_i386.deb \
packages/ethereal-common_0.9.4-1woody2_ia64.deb \
packages/ethereal-dev_0.9.4-1woody2_i386.deb \
packages/ethereal-dev_0.9.4-1woody2_ia64.deb \
packages/ethereal_0.9.4-1woody2_i386.deb \
packages/ethereal_0.9.4-1woody2_ia64.deb \
packages/tethereal_0.9.4-1woody2_i386.deb \
packages/tethereal_0.9.4-1woody2_ia64.deb
file1=packages/ethereal_0.9.4-1woody3.dsc
file2=packages/ethereal_0.9.4-1woody3.diff.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.4-1woody3_i386.deb \
packages/ethereal-common_0.9.4-1woody3_ia64.deb \
packages/ethereal-dev_0.9.4-1woody3_i386.deb \
packages/ethereal-dev_0.9.4-1woody3_ia64.deb \
packages/ethereal_0.9.4-1woody3_i386.deb \
packages/ethereal_0.9.4-1woody3_ia64.deb \
packages/tethereal_0.9.4-1woody3_i386.deb \
packages/tethereal_0.9.4-1woody3_ia64.deb
file1=packages/ethereal_0.9.4-1woody4.dsc
file2=packages/ethereal_0.9.4-1woody4.diff.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.4-1woody4_i386.deb \
packages/ethereal-common_0.9.4-1woody4_ia64.deb \
packages/ethereal-dev_0.9.4-1woody4_i386.deb \
packages/ethereal-dev_0.9.4-1woody4_ia64.deb \
packages/ethereal_0.9.4-1woody4_i386.deb \
packages/ethereal_0.9.4-1woody4_ia64.deb \
packages/tethereal_0.9.4-1woody4_i386.deb \
packages/tethereal_0.9.4-1woody4_ia64.deb
file1=packages/ethereal_0.9.4-1woody5.dsc
file2=packages/ethereal_0.9.4-1woody5.diff.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.4-1woody5_i386.deb \
packages/ethereal-common_0.9.4-1woody5_ia64.deb \
packages/ethereal-dev_0.9.4-1woody5_i386.deb \
packages/ethereal-dev_0.9.4-1woody5_ia64.deb \
packages/ethereal_0.9.4-1woody5_i386.deb \
packages/ethereal_0.9.4-1woody5_ia64.deb \
packages/tethereal_0.9.4-1woody5_i386.deb \
packages/tethereal_0.9.4-1woody5_ia64.deb
file1=packages/ethereal_0.9.4-1woody6.dsc
file2=packages/ethereal_0.9.4-1woody6.diff.gz
plumb_files $file1 $file2 $file3 \
packages/ethereal-common_0.9.4-1woody6_i386.deb \
packages/ethereal-common_0.9.4-1woody6_ia64.deb \
packages/ethereal-dev_0.9.4-1woody6_i386.deb \
packages/ethereal-dev_0.9.4-1woody6_ia64.deb \
packages/ethereal_0.9.4-1woody6_i386.deb \
packages/ethereal_0.9.4-1woody6_ia64.deb \
packages/tethereal_0.9.4-1woody6_i386.deb \
packages/tethereal_0.9.4-1woody6_ia64.deb
file1=packages/xsok_1.02-9.dsc
file2=packages/xsok_1.02-9.diff.gz
file3=packages/xsok_1.02.orig.tar.gz
plumb_files $file1 $file2 $file3 \
packages/xsok_1.02-9_i386.deb \
packages/xsok_1.02-9_ia64.deb
file1=packages/xsok_1.02-9woody2.dsc
file2=packages/xsok_1.02-9woody2.diff.gz
plumb_files $file1 $file2 $file3 \
packages/xsok_1.02-9woody2_i386.deb \
packages/xsok_1.02-9woody2_ia64.deb
# vim:set ai et sw=4 ts=4 tw=75:
|
64studio/pdk
|
atest/resolve-compile-hard-packages.sh
|
Shell
|
gpl-2.0
| 5,855 |
# this file contains just simple commands or more complex one liners
# delete files that are older than 10 days and ignore all files that contain "README"
find . -mtime +10 | xargs ls | grep -v README | xargs rm -f
# delete all files older than 10 days
find . -mtime +10 -exec rm {} \;
# get public ip
curl -s checkip.dyndns.org|sed -e 's/.*Current IP Address: //' -e 's/<.*$//'
curl ipecho.net/plain
curl ifconfig.me
wget -qO- icanhazip.com
|
thauzer/TestProjects
|
bash/one_liners.sh
|
Shell
|
gpl-2.0
| 445 |
#!/bin/sh
# Copyright (c) 2017 Oracle and/or its affiliates. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it would be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alexey Kodanev <[email protected]>
TCID=dccp01
TST_TOTAL=3
TST_CLEANUP="cleanup"
TST_NEEDS_TMPDIR=1
. test_net.sh
cleanup()
{
tst_rmdir
}
setup()
{
tst_require_root
}
test_run()
{
tst_resm TINFO "compare UDP/DCCP performance"
tst_netload -H $(tst_ipaddr rhost) -T udp
local res0="$(cat tst_netload.res)"
tst_netload -H $(tst_ipaddr rhost) -T dccp
local res1="$(cat tst_netload.res)"
local per=$(( $res0 * 100 / $res1 - 100 ))
if [ "$per" -gt "100" -o "$per" -lt "-100" ]; then
tst_resm TFAIL "dccp performance $per %"
else
tst_resm TPASS "dccp performance $per % in range -100 ... 100 %"
fi
}
setup
test_run
tst_exit
|
richiejp/ltp
|
testcases/network/dccp/dccp01.sh
|
Shell
|
gpl-2.0
| 1,381 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.