code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env bash
#
# List of contrib packages to be released
# See build-support/README.md for more information on the format of each
# `PKG_$NAME` definition.
#
PKG_SCROOGE=(
"pantsbuild.pants.contrib.scrooge"
"//contrib/scrooge/src/python/pants/contrib/scrooge:plugin"
"pkg_scrooge_install_test"
)
function pkg_scrooge_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.scrooge==${version}']" \
--explain gen | grep "scrooge" &> /dev/null && \
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.scrooge==${version}']" \
--explain lint | grep "thrift" &> /dev/null
}
PKG_BUILDGEN=(
"pantsbuild.pants.contrib.buildgen"
"//contrib/buildgen/src/python/pants/contrib/buildgen:plugin"
"pkg_buildgen_install_test"
)
function pkg_buildgen_install_test() {
local version=$1
shift
local PIP_ARGS="$@"
pip install ${PIP_ARGS} "pantsbuild.pants.contrib.buildgen==${version}" && \
python -c "from pants.contrib.buildgen.build_file_manipulator import *"
}
PKG_GO=(
"pantsbuild.pants.contrib.go"
"//contrib/go/src/python/pants/contrib/go:plugin"
"pkg_go_install_test"
)
function pkg_go_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.go==${version}']" \
--explain test | grep "GoTest_test_go" &> /dev/null
}
PKG_NODE=(
"pantsbuild.pants.contrib.node"
"//contrib/node/src/python/pants/contrib/node:plugin"
"pkg_node_install_test"
)
function pkg_node_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.node==${version}']" \
--explain test | grep "NodeTest_test_node" &> /dev/null
}
PKG_SCALAJS=(
"pantsbuild.pants.contrib.scalajs"
"//contrib/scalajs/src/python/pants/contrib/scalajs:plugin"
"pkg_scalajs_install_test"
)
function pkg_scalajs_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.scalajs==${version}']" \
--explain compile | grep "scala-js-link" &> /dev/null
}
PKG_PYTHON_CHECKS=(
"pantsbuild.pants.contrib.python.checks"
"//contrib/python/src/python/pants/contrib/python/checks:plugin"
"pkg_python_checks_install_test"
)
function pkg_python_checks_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.python.checks==${version}']" \
--explain lint | grep "python-eval" &> /dev/null && \
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.python.checks==${version}']" \
--explain lint | grep "pythonstyle" &> /dev/null
}
PKG_FINDBUGS=(
"pantsbuild.pants.contrib.findbugs"
"//contrib/findbugs/src/python/pants/contrib/findbugs:plugin"
"pkg_findbugs_install_test"
)
function pkg_findbugs_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.findbugs==${version}']" \
--explain compile | grep "findbugs" &> /dev/null
}
PKG_CPP=(
"pantsbuild.pants.contrib.cpp"
"//contrib/cpp/src/python/pants/contrib/cpp:plugin"
"pkg_cpp_install_test"
)
function pkg_cpp_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.cpp==${version}']" \
--explain compile | grep "cpp" &> /dev/null
}
PKG_CONFLUENCE=(
"pantsbuild.pants.contrib.confluence"
"//contrib/confluence/src/python/pants/contrib/confluence:plugin"
"pkg_confluence_install_test"
)
function pkg_confluence_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.confluence==${version}']" \
--explain confluence | grep "ConfluencePublish_confluence" &> /dev/null
}
PKG_ERRORPRONE=(
"pantsbuild.pants.contrib.errorprone"
"//contrib/errorprone/src/python/pants/contrib/errorprone:plugin"
"pkg_errorprone_install_test"
)
function pkg_errorprone_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.errorprone==${version}']" \
--explain compile | grep "errorprone" &> /dev/null
}
PKG_CODEANALYSIS=(
"pantsbuild.pants.contrib.codeanalysis"
"//contrib/codeanalysis/src/python/pants/contrib/codeanalysis:plugin"
"pkg_codeanalysis_install_test"
)
function pkg_codeanalysis_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.codeanalysis==${version}']" \
--explain index | grep "kythe" &> /dev/null
}
PKG_JAXWS=(
"pantsbuild.pants.contrib.jax_ws"
"//contrib/jax_ws/src/python/pants/contrib/jax_ws:plugin"
"pkg_jax_ws_install_test"
)
function pkg_jax_ws_install_test() {
local version=$1
# Ensure our goal and target are installed and exposed.
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.jax_ws==${version}']" \
--explain gen | grep "jax-ws" &> /dev/null
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.jax_ws==${version}']" \
targets | grep "jax_ws_library" &> /dev/null
}
PKG_MYPY=(
"pantsbuild.pants.contrib.mypy"
"//contrib/mypy/src/python/pants/contrib/mypy:plugin"
"pkg_mypy_install_test"
)
function pkg_mypy_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.mypy==${version}']" \
--explain mypy &> /dev/null
}
PKG_AVRO=(
"pantsbuild.pants.contrib.avro"
"//contrib/avro/src/python/pants/contrib/avro:plugin"
"pkg_avro_install_test"
)
function pkg_avro_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.avro==${version}']" \
--explain gen | grep "avro-java" &> /dev/null
}
PKG_THRIFTY=(
"pantsbuild.pants.contrib.thrifty"
"//contrib/thrifty/src/python/pants/contrib/thrifty:plugin"
"pkg_thrifty_install_test"
)
function pkg_thrifty_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.thrifty==${version}']" \
--explain gen | grep "thrifty" &> /dev/null
}
PKG_GOOGLEJAVAFORMAT=(
"pantsbuild.pants.contrib.googlejavaformat"
"//contrib/googlejavaformat/src/python/pants/contrib/googlejavaformat:plugin"
"pkg_googlejavaformat_install_test"
)
function pkg_googlejavaformat_install_test() {
local version=$1
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.googlejavaformat==${version}']" \
--explain fmt | grep "google-java-format" &> /dev/null
execute_packaged_pants_with_internal_backends \
--plugins="['pantsbuild.pants.contrib.googlejavaformat==${version}']" \
--explain lint | grep "google-java-format" &> /dev/null
}
# Once individual (new) package is declared above, insert it into the array below)
CONTRIB_PACKAGES=(
PKG_SCROOGE
PKG_BUILDGEN
PKG_GO
PKG_NODE
PKG_PYTHON_CHECKS
PKG_SCALAJS
PKG_FINDBUGS
PKG_CPP
PKG_CONFLUENCE
PKG_ERRORPRONE
PKG_CODEANALYSIS
PKG_JAXWS
PKG_MYPY
PKG_AVRO
PKG_THRIFTY
PKG_GOOGLEJAVAFORMAT
)
|
foursquare/pants
|
contrib/release_packages.sh
|
Shell
|
apache-2.0
| 7,350 |
#!/bin/bash
PLATFORM=`uname -s`
PRG="$0"
PRGDIR=`dirname "$PRG"`
BASEDIR=`cd "$PRGDIR" >/dev/null; pwd`
cd $BASEDIR
echo
echo "Building library avro-c-1.7.7"
cd third-party/avro-c-1.7.7/
if [ ! -d "build" ]; then
mkdir build
fi
cd build
cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo
make
cd ../../..
echo
echo "Building library htslib"
cd third-party/htslib
make
cd ../..
echo
echo "Building library samtools"
cd third-party/samtools
make HTSDIR=../htslib
cd ../..
olib="libhpgbigdata.so"
if [[ "Darwin" == "$PLATFORM" ]]; then
olib="libhpgbigdata.dylib"
fi
echo
echo "Building the dynamic library $olib"
gcc -O3 -std=gnu99 ./converters/bam2ga.c jni/org_opencb_hpg_bigdata_core_NativeSupport.c -o $olib -shared -fPIC -I third-party/avro-c-1.7.7/src/ -I $JAVA_HOME/include -I $JAVA_HOME/include/linux -I $JAVA_HOME/include/darwin -I third-party/ -I third-party/htslib/ -L third-party/avro-c-1.7.7/build/src/ -L third-party/htslib/ -lhts -lavro -lpthread
|
opencb/hpg-bigdata
|
hpg-bigdata-core/native/build.sh
|
Shell
|
apache-2.0
| 965 |
#!/bin/sh
set -o errexit # Exit the script with error if any of the commands fail
# Supported/used environment variables:
# SSL Set to "yes" to enable SSL. Defaults to "nossl"
# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info)
# API_VERSION Optional API_VERSION environment variable for run-tests.php
# IS_MATRIX_TESTING Set to "true" to enable matrix testing. Defaults to empty string. If "true", DRIVER_MONGODB_VERSION and MONGODB_VERSION will also be checked.
# MOCK_SERVICE_ID Set to "1" to enable service ID mocking for load balancers. Defaults to empty string.
SSL=${SSL:-nossl}
MONGODB_URI=${MONGODB_URI:-}
API_VERSION=${API_VERSION:-}
IS_MATRIX_TESTING=${IS_MATRIX_TESTING:-}
MOCK_SERVICE_ID=${MOCK_SERVICE_ID:-}
# For matrix testing, we have to determine the correct driver version
if [ "${IS_MATRIX_TESTING}" = "true" ]; then
case "${DRIVER_MONGODB_VERSION}" in
'4.4')
export EXTENSION_VERSION='1.8.2'
;;
'4.2')
export EXTENSION_VERSION='1.6.1'
;;
'4.0')
export EXTENSION_VERSION='1.5.5'
;;
esac
case "${MONGODB_VERSION}" in
latest)
MONGODB_VERSION_NUMBER='5.0'
;;
*)
MONGODB_VERSION_NUMBER=$MONGODB_VERSION
;;
esac
PHPUNIT_OPTS="--dont-report-useless-tests --exclude-group matrix-testing-exclude-server-${MONGODB_VERSION_NUMBER}-driver-${DRIVER_MONGODB_VERSION},matrix-testing-exclude-server-${MONGODB_VERSION_NUMBER}-driver-${DRIVER_MONGODB_VERSION}-topology-${TOPOLOGY}"
DIR=$(dirname $0)
. $DIR/install-dependencies.sh
fi
# Enable verbose output to see skipped and incomplete tests
PHPUNIT_OPTS="${PHPUNIT_OPTS} -v"
# For load balancer testing, we need to enable service ID mocking
if [ "${MOCK_SERVICE_ID}" = "1" ]; then
PHPUNIT_OPTS="${PHPUNIT_OPTS} -d mongodb.mock_service_id=1"
fi
# Determine if MONGODB_URI already has a query string
SUFFIX=$(echo "$MONGODB_URI" | grep -Eo "\?(.*)" | cat)
if [ "$SSL" = "yes" ]; then
if [ -z "$SUFFIX" ]; then
MONGODB_URI="${MONGODB_URI}/?ssl=true&sslallowinvalidcertificates=true"
else
MONGODB_URI="${MONGODB_URI}&ssl=true&sslallowinvalidcertificates=true"
fi
fi
echo "Running tests with URI: $MONGODB_URI"
# Disable failing PHPUnit due to deprecations
export SYMFONY_DEPRECATIONS_HELPER=999999
# Run the tests, and store the results in a Evergreen compatible JSON results file
case "$TESTS" in
atlas-data-lake*)
MONGODB_URI="mongodb://mhuser:[email protected]:27017"
php vendor/bin/simple-phpunit --configuration phpunit.evergreen.xml --testsuite "Atlas Data Lake Test Suite" $PHPUNIT_OPTS
;;
versioned-api)
php vendor/bin/simple-phpunit --configuration phpunit.evergreen.xml --group versioned-api $PHPUNIT_OPTS
;;
serverless)
php vendor/bin/simple-phpunit --configuration phpunit.evergreen.xml --group serverless $PHPUNIT_OPTS
;;
*)
php vendor/bin/simple-phpunit --configuration phpunit.evergreen.xml $PHPUNIT_OPTS
;;
esac
|
mongodb/mongo-php-library
|
.evergreen/run-tests.sh
|
Shell
|
apache-2.0
| 3,110 |
#!/bin/bash
#******************************************************************************
# Copyright 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
echo "Please source appropriate versions of Intel Compiler (ICC) and " \
"Intel MKL, and build MKL-DNN 0.9 or later"
# source <ICC_INSTALDIR>
# source <MKL_INSTALDIR>
# export DNNROOT=<MKLDNN_INSTALL_DIR>
export LD_LIBRARY_PATH=$DNNROOT/lib:$LD_LIBRARY_PATH
export KMP_HW_SUBSET=1T
export KMP_AFFINITY=compact,granularity=fine
export OMP_NUM_THREADS=$(lscpu | grep 'Core(s) per socket' | awk '{print $NF}')
make clean all CONVLIB=MKLDNN || \
{ echo "*** ERROR: make failed"; exit 1; }
if lscpu | grep Flags | grep -qs avx512dq; then
./run_mkl_conv_ia_SKX.sh
elif lscpu | grep Flags | grep -qa avx512f; then
./run_mkl_conv_ia_KNL.sh
else
./run_mkl_conv_ia_generic.sh
fi
|
rsdubtso/DeepBench
|
code/intel/convolution/mkl_conv/run_mkl_conv_ia.sh
|
Shell
|
apache-2.0
| 1,454 |
#!/bin/bash
cat installed_files.txt | xargs rm -rvf
|
saurabh-hirani/icinga2_api
|
uninstall.sh
|
Shell
|
isc
| 52 |
#!/bin/bash
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is an example script that starts vtctld.
set -e
cell='test'
grpc_port=15999
script_root=`dirname "${BASH_SOURCE}"`
source $script_root/env.sh
echo "Starting vtctld..."
$VTROOT/bin/vtctld \
$TOPOLOGY_FLAGS \
-cell $cell \
-web_dir $VTTOP/web/vtctld \
-web_dir2 $VTTOP/web/vtctld2/app \
-workflow_manager_init \
-workflow_manager_use_election \
-service_map 'grpc-vtctl' \
-backup_storage_implementation file \
-file_backup_storage_root $VTDATAROOT/backups \
-log_dir $VTDATAROOT/tmp \
-port $vtctld_web_port \
-grpc_port $grpc_port \
-pid_file $VTDATAROOT/tmp/vtctld.pid \
> $VTDATAROOT/tmp/vtctld.out 2>&1 &
disown -a
echo "Access vtctld web UI at http://$hostname:$vtctld_web_port"
echo "Send commands with: vtctlclient -server $hostname:$grpc_port ..."
|
NazarethCollege/heweb2017-devops-presentation
|
sites/tweetheat/src/backend/vendor/src/github.com/youtube/vitess/examples/local/vtctld-up.sh
|
Shell
|
mit
| 1,387 |
#!/bin/sh
HOME=/root
PATH=/sbin:/bin:/usr/sbin:/usr/bin
PS1="linaro-test [rc=$(echo \$?)]# "
export HOME PS1 PATH
early_setup() {
mkdir -p /proc /sys /tmp /run
mount -t proc proc /proc
mount -t sysfs sysfs /sys
mount -t devtmpfs none /dev
ln -s /run /var/run
chmod 0666 /dev/tty*
chown root:tty /dev/tty*
}
read_args() {
[ -z "$CMDLINE" ] && CMDLINE=`cat /proc/cmdline`
for arg in $CMDLINE; do
optarg=`expr "x$arg" : 'x[^=]*=\(.*\)'`
case $arg in
console=*)
tty=${arg#console=}
tty=${tty#/dev/}
case $tty in
tty[a-zA-Z]* )
port=${tty%%,*}
esac ;;
debug) set -x ;;
esac
done
}
early_setup
read_args
setsid sh </dev/${port} >/dev/${port} 2>&1
|
wwright2/dcim3-angstrom1
|
sources/meta-linaro/meta-linaro/recipes-core/initrdscripts/files/init-boot-linaro.sh
|
Shell
|
mit
| 845 |
#!/bin/bash
HOME_DIR=`echo $HOME | sed -e s/\\\\/$//`
ROOT_DIR=${HOME_DIR}/bin/skinny-framework
rm -rf ${ROOT_DIR}
mkdir -p ${ROOT_DIR}
SKINNY_COMMAND=${ROOT_DIR}/skinny
cd ${ROOT_DIR}
TEMPLATE_PATH=https://raw.githubusercontent.com/skinny-framework/skinny-framework/develop/yeoman-generator-skinny/app/templates
wget ${TEMPLATE_PATH}/skinny
wget ${TEMPLATE_PATH}/sbt
wget ${TEMPLATE_PATH}/sbt-debug
chmod +x *
mkdir -p bin
cd bin
wget ${TEMPLATE_PATH}/bin/sbt-launch.jar
cd -
SHELL_PROFILE=${HOME_DIR}/.bash_profile
if [[ "$SHELL" == *zsh* ]]; then
SHELL_PROFILE=${HOME_DIR}/.zprofile
fi
if [ ! `grep 'PATH=${PATH}:${HOME}/bin/skinny-framework' ${SHELL_PROFILE}` ]; then
echo "PATH=\${PATH}:\${HOME}/bin/skinny-framework" >> ${SHELL_PROFILE}
fi
echo "
command installed to ${SKINNY_COMMAND}
Please execute 'source ${SHELL_PROFILE}'
"
|
seratch/skinny-framework
|
setup.sh
|
Shell
|
mit
| 848 |
#!/usr/bin/env bash
set -ex
cd `dirname $0`
VERSION=$1
if [[ "${VERSION}" == "" ]]
then
echo "Version number required"
exit 1
fi
./build.sh
echo "====== RENAMING 0.0.0-PLACEHOLDER to 2.0.0-rc.${VERSION} ======"
find ./dist/packages-dist/ -type f -name package.json -print0 | xargs -0 sed -i '' "s/0\\.0\\.0-PLACEHOLDER/2.0.0-rc.${VERSION}/g"
find ./dist/packages-dist/ -type f -name "*umd.js" -print0 | xargs -0 sed -i '' "s/0\\.0\\.0-PLACEHOLDER/2.0.0-rc.${VERSION}/g"
for PACKAGE in \
core \
compiler \
compiler-cli \
common \
http \
platform-browser \
platform-server \
router \
router-deprecated \
upgrade
do
DESTDIR=./dist/packages-dist/${PACKAGE}
echo "====== PUBLISHING: ${DESTDIR} ====="
npm publish ${DESTDIR} --access public
done
|
alfonso-presa/angular
|
publish-packages.sh
|
Shell
|
mit
| 777 |
#!/usr/bin/python ../scripts/shcov
echo Rad 1
echo Rad 2
|
SimonKagstrom/shcov
|
tests/hash_bang.sh
|
Shell
|
gpl-2.0
| 58 |
#!/bin/sh
#
# Copyright (c) 2006, 2014 Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 6394084
# @summary Redefine class can't handle addition of 64 bit constants in JDK1.5.0_05
#
# @key intermittent
# @run shell RedefineIntConstantToLong.sh
compileOptions=-g
compileOptions2=-g
# Uncomment this to see the JDI trace
#jdbOptions=-dbgtrace
createJavaFile()
{
cat <<EOF > $1.java.1
public final class $1 {
public long m1(int i) {
long r=0;
r = m2(i * 2); // @1 commentout
// @1 uncomment r =m2(i * 2L);
return r;
}
public long m2(int j) {
System.out.println(System.getProperty("line.separator") +
"**** public long m2(int j) with value: " + j);
return j;
}
public long m2(long j) {
System.out.println(System.getProperty("line.separator") +
"**** public long m2(long j) with value: " + j);
return j;
}
public void doit() throws Exception {
long r1 = 0;
long r2;
r1 = m1(1000);
r2 = 0; // @1 breakpoint
r2 = m1(1000);
if (r1 != r2) { // @1 breakpoint
throw new Exception("FAILURE: Expected value: " + r1 + " Actual value: " + r2);
} else {
System.out.println("SUCCESS: Expected value: " + r1 + " Actual value: " + r2);
}
}
public static void main(String args[]) throws Exception {
new $1().doit();
}
}
EOF
}
# This is called to feed cmds to jdb.
dojdbCmds()
{
setBkpts @1
runToBkpt @1
redefineClass @1
setBkpts @1
contToBkpt
cmd where
cmd allowExit cont
}
mysetup()
{
if [ -z "$TESTSRC" ] ; then
TESTSRC=.
fi
for ii in . $TESTSRC $TESTSRC/.. ; do
if [ -r "$ii/ShellScaffold.sh" ] ; then
. $ii/ShellScaffold.sh
break
fi
done
}
# You could replace this next line with the contents
# of ShellScaffold.sh and this script will run just the same.
mysetup
runit
jdbFailIfPresent 'FAILURE:'
pass
|
FauxFaux/jdk9-jdk
|
test/com/sun/jdi/RedefineIntConstantToLong.sh
|
Shell
|
gpl-2.0
| 3,046 |
#!/bin/bash
# restore_leader_if_missing.sh is a workaround for patroni not currently
# having a way to replicate a new leader from a wal-e backup.
#
# The idea is that patroni can create a replica from wal-e if there is a leader
# though I'm not sure why a leader is required. That's future work in patroni.
#
# So this script will create a fake leader to trick patroni into restoring a new
# leader from wal-e backup.
#
# It will only run the restoration process if:
# * there is no current leader,
# * if there is no local DB initialized, and
# * if there is a wal-e backup available
set -e # fail fast
if [[ -z $WALE_CMD ]]; then
echo "restore_leader_if_missing.sh: Requires \$WALE_CMD; e.g. envdir \${WALE_ENV_DIR} wal-e --aws-instance-profile"
exit 0
fi
if [[ -z ${PG_DATA_DIR} ]]; then
echo "restore_leader_if_missing.sh: Requires \${PG_DATA_DIR}"
exit 0
fi
if [[ -z "${PATRONI_SCOPE}" ]]; then
echo "restore_leader_if_missing.sh: Requires \$PATRONI_SCOPE to report backup-list for service"
exit 0
fi
if [[ -z "${ETCD_HOST_PORT}" ]]; then
echo "restore_leader_if_missing.sh: Requires \$ETCD_HOST_PORT (host:port) to update backup-list data to etcd"
exit 0
fi
if [[ -z "${WALE_S3_PREFIX}" ]]; then
echo "restore_leader_if_missing.sh: Requires \$WALE_S3_PREFIX into which to store sysid"
exit 0
fi
if [[ -z "${WAL_S3_BUCKET}" ]]; then
echo "restore_leader_if_missing.sh: Requires \$WAL_S3_BUCKET into which to store sysid"
exit 0
fi
indent_restore_leader() {
c="s/^/${PATRONI_SCOPE:0:6}-restore_leader_if_missing> /"
case $(uname) in
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
(
if [[ "$(curl -s ${ETCD_HOST_PORT}/v2/keys/service/${PATRONI_SCOPE}/leader | jq -r .node.value)" != "null" ]]; then
echo "leader exists, no additional preparation required for container to join cluster"
exit 0
fi
if [[ -d ${PG_DATA_DIR}/global ]]; then
echo "local database exists; no additional preparation required to restart container"
exit 0
fi
BACKUPS_LINES=$($WALE_CMD backup-list 2>/dev/null|wc -l)
if [[ $BACKUPS_LINES -lt 2 ]]; then
echo "new cluster, no existing backup to restore"
exit 0
fi
# must have /initialize set
if [[ "$(curl -s ${ETCD_HOST_PORT}/v2/keys/service/${PATRONI_SCOPE}/initialize | jq -r .node.value)" == "null" ]]; then
echo "etcd missing /initialize system ID, fetching from ${WALE_S3_PREFIX}sysids"
region=$(aws s3api get-bucket-location --bucket ${WAL_S3_BUCKET} | jq -r '.LocationConstraint')
if [[ ${region} != 'null' ]]; then
region_option="--region ${region}"
fi
aws s3 ${region_option} sync ${WALE_S3_PREFIX}sysids /tmp/sysids
if [[ ! -f /tmp/sysids/sysid ]]; then
echo "Target ${WALE_S3_PREFIX} missing /sysids/sysid for original 'Database system identifier'"
exit 1
fi
echo "Re-initializing /${PATRONI_SCOPE}/initialize with original 'Database system identifier'"
curl -s ${ETCD_HOST_PORT}/v2/keys/service/${PATRONI_SCOPE}/initialize -XPUT -d "value=$(cat /tmp/sysids/sysid)"
fi
echo "preparing patroni to restore this container from wal-e backups"
) 2>&1 | indent_restore_leader
|
dingotiles/dingo-postgresql-release
|
images/postgresql96-patroni/scripts/postgres/restore_leader_if_missing.sh
|
Shell
|
apache-2.0
| 3,260 |
#!/bin/bash
# Copyright 2014 Yajie Miao Carnegie Mellon University Apache 2.0
# This script trains Maxout Network models over fMLLR features. It is to be
# run after run.sh. Before running this, you should already build the initial
# GMM model. This script requires a GPU, and also the "pdnn" toolkit to train
# the DNN.
# We implement the <Maxout> activation function, based on Kaldi "revision 4960".
# Please follow the following steps:
# 1. Go to /path/to/kaldi/src/nnet and *backup* nnet-component.h, nnet-component.cc, nnet-activation.h
# 2. Download these 3 files from here:
# http://www.cs.cmu.edu/~ymiao/codes/kaldipdnn/nnet-component.h
# http://www.cs.cmu.edu/~ymiao/codes/kaldipdnn/nnet-component.cc
# http://www.cs.cmu.edu/~ymiao/codes/kaldipdnn/nnet-activation.h
# 3. Recompile Kaldi
# For more informaiton regarding the recipes and results, visit the webiste
# http://www.cs.cmu.edu/~ymiao/kaldipdnn
working_dir=exp_pdnn/dnn_maxout
gmmdir=exp/tri3
# Specify the gpu device to be used
gpu=gpu
cmd=run.pl
. cmd.sh
[ -f path.sh ] && . ./path.sh
. parse_options.sh || exit 1;
# At this point you may want to make sure the directory $working_dir is
# somewhere with a lot of space, preferably on the local GPU-containing machine.
if [ ! -d pdnn ]; then
echo "Checking out PDNN code."
svn co https://github.com/yajiemiao/pdnn/trunk pdnn
fi
if [ ! -d steps_pdnn ]; then
echo "Checking out steps_pdnn scripts."
svn co https://github.com/yajiemiao/kaldipdnn/trunk/steps_pdnn steps_pdnn
fi
if ! nvidia-smi; then
echo "The command nvidia-smi was not found: this probably means you don't have a GPU."
echo "(Note: this script might still work, it would just be slower.)"
fi
# The hope here is that Theano has been installed either to python or to python2.6
pythonCMD=python
if ! python -c 'import theano;'; then
if ! python2.6 -c 'import theano;'; then
echo "Theano does not seem to be installed on your machine. Not continuing."
echo "(Note: this script might still work, it would just be slower.)"
exit 1;
else
pythonCMD=python2.6
fi
fi
mkdir -p $working_dir/log
! gmm-info $gmmdir/final.mdl >&/dev/null && \
echo "Error getting GMM info from $gmmdir/final.mdl" && exit 1;
num_pdfs=`gmm-info $gmmdir/final.mdl | grep pdfs | awk '{print $NF}'` || exit 1;
echo =====================================================================
echo " Data Split & Alignment & Feature Preparation "
echo =====================================================================
# Split training data into traing and cross-validation sets for DNN
if [ ! -d data/train_tr95 ]; then
utils/subset_data_dir_tr_cv.sh --cv-spk-percent 5 data/train data/train_tr95 data/train_cv05 || exit 1
fi
# Alignment on the training and validation data
for set in tr95 cv05; do
if [ ! -d ${gmmdir}_ali_$set ]; then
steps/align_fmllr.sh --nj 16 --cmd "$train_cmd" \
data/train_$set data/lang $gmmdir ${gmmdir}_ali_$set || exit 1
fi
done
# Dump fMLLR features. "Fake" cmvn states (0 means and 1 variance) are applied.
for set in tr95 cv05; do
if [ ! -d $working_dir/data/train_$set ]; then
steps/nnet/make_fmllr_feats.sh --nj 16 --cmd "$train_cmd" \
--transform-dir ${gmmdir}_ali_$set \
$working_dir/data/train_$set data/train_$set $gmmdir $working_dir/_log $working_dir/_fmllr || exit 1
steps/compute_cmvn_stats.sh --fake \
$working_dir/data/train_$set $working_dir/_log $working_dir/_fmllr || exit 1;
fi
done
for set in dev test; do
if [ ! -d $working_dir/data/$set ]; then
steps/nnet/make_fmllr_feats.sh --nj 10 --cmd "$train_cmd" \
--transform-dir $gmmdir/decode_$set \
$working_dir/data/$set data/$set $gmmdir $working_dir/_log $working_dir/_fmllr || exit 1
steps/compute_cmvn_stats.sh --fake \
$working_dir/data/$set $working_dir/_log $working_dir/_fmllr || exit 1;
fi
done
echo =====================================================================
echo " Training and Cross-Validation Pfiles "
echo =====================================================================
# By default, DNN inputs include 11 frames of fMLLR
for set in tr95 cv05; do
if [ ! -f $working_dir/${set}.pfile.done ]; then
steps_pdnn/build_nnet_pfile.sh --cmd "$train_cmd" --norm-vars false \
--splice-opts "--left-context=5 --right-context=5" \
$working_dir/data/train_$set ${gmmdir}_ali_$set $working_dir || exit 1
( cd $working_dir; mv concat.pfile ${set}.pfile; gzip ${set}.pfile; )
touch $working_dir/${set}.pfile.done
fi
done
# Rename pfiles to keep consistency
( cd $working_dir;
ln -s tr95.pfile.gz train.pfile.gz; ln -s cv05.pfile.gz valid.pfile.gz
)
echo =====================================================================
echo " DNN Pre-training & Fine-tuning "
echo =====================================================================
# Here we use maxout networks. When using maxout, we need to reduce the learning rate. To apply dropout,
# add "--dropout-factor 0.2,0.2,0.2,0.2" and change the value of "--lrate" to "D:0.1:0.5:0.2,0.2:8"
# Check run_timit/RESULTS for the results
# The network structure is set in the way that this maxout network has approximately the same number of
# parameters as the DNN model in run-dnn.sh
feat_dim=$(gunzip -c $working_dir/train.pfile.gz |head |grep num_features| awk '{print $2}') || exit 1;
if [ ! -f $working_dir/dnn.fine.done ]; then
echo "Fine-tuning DNN"
$cmd $working_dir/log/dnn.fine.log \
export PYTHONPATH=$PYTHONPATH:`pwd`/pdnn/ \; \
export THEANO_FLAGS=mode=FAST_RUN,device=$gpu,floatX=float32 \; \
$pythonCMD pdnn/cmds/run_DNN.py --train-data "$working_dir/train.pfile.gz,partition=1000m,random=true,stream=false" \
--valid-data "$working_dir/valid.pfile.gz,partition=200m,random=true,stream=false" \
--nnet-spec "$feat_dim:625:625:625:625:$num_pdfs" \
--activation "maxout:3" \
--lrate "D:0.008:0.5:0.2,0.2:8" \
--wdir $working_dir --kaldi-output-file $working_dir/dnn.nnet || exit 1;
touch $working_dir/dnn.fine.done
fi
echo =====================================================================
echo " Decoding "
echo =====================================================================
if [ ! -f $working_dir/decode.done ]; then
cp $gmmdir/final.mdl $working_dir || exit 1; # copy final.mdl for scoring
graph_dir=$gmmdir/graph
steps_pdnn/decode_dnn.sh --nj 12 --scoring-opts "--min-lmwt 1 --max-lmwt 8" --cmd "$decode_cmd" \
$graph_dir $working_dir/data/dev ${gmmdir}_ali_tr95 $working_dir/decode_dev || exit 1;
steps_pdnn/decode_dnn.sh --nj 12 --scoring-opts "--min-lmwt 1 --max-lmwt 8" --cmd "$decode_cmd" \
$graph_dir $working_dir/data/test ${gmmdir}_ali_tr95 $working_dir/decode_test || exit 1;
touch $working_dir/decode.done
fi
echo "Finish !!"
|
weiwchu/kaldipdnn
|
run_timit/run-dnn-maxout.sh
|
Shell
|
apache-2.0
| 7,065 |
#!/bin/bash
set -x
env
# Download maven 3 if the system maven isn't maven 3
VERSION=`mvn -v | grep "Apache Maven 3"`
if [ -z "${VERSION}" ]; then
curl http://archive.apache.org/dist/maven/binaries/apache-maven-3.2.1-bin.tar.gz > apache-maven-3.2.1-bin.tar.gz
tar -xvzf apache-maven-3.2.1-bin.tar.gz
MVN=${PWD}/apache-maven-3.2.1/bin/mvn
else
MVN=mvn
fi
# Get the expected common version
COMMON_VERSION=$1
# Get rid of the version argument
shift
# Get rid of the java property name containing the args
shift
RUN_BUILD=false
for ARG in $*; do
if [ "$ARG" = "package" ]; then
RUN_BUILD=true
fi
if [ "$ARG" = "install" ]; then
RUN_BUILD=true
fi
done
if [ $RUN_BUILD = "true" ]; then
if [ ! -z "$ZUUL_BRANCH" ]; then
BRANCH=${ZUUL_BRANCH}
else
BRANCH=${ZUUL_REF}
fi
( cd common; ./build_common.sh ${MVN} ${COMMON_VERSION} ${BRANCH} )
RC=$?
if [ $RC != 0 ]; then
exit $RC
fi
fi
# Invoke the maven 3 on the real pom.xml
( cd java; ${MVN} -DgitRevision=`git rev-list HEAD --max-count 1 --abbrev=0 --abbrev-commit` $* )
RC=$?
# Copy the jars where the publisher will find them
if [ $RUN_BUILD = "true" ]; then
if [ ! -L target ]; then
ln -sf java/target target
fi
fi
rm -fr apache-maven-3.2.1*
exit $RC
|
stackforge/monasca-api
|
run_maven.sh
|
Shell
|
apache-2.0
| 1,305 |
#!/bin/bash
# Copyright 2018 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This assumes it is being run inside a docker container of gold-karma-chrome-tests
# and a Skia checkout has been mounted at /SRC and the output directory
# is mounted at /OUT
# For example:
# docker run -v $SKIA_ROOT:/SRC -v /tmp/dockerout:/OUT gcr.io/skia-public/gold-karma-chrome-tests:72.0.3626.121_v1 /SRC/infra/canvaskit/test_canvaskit.sh
set -ex
#BASE_DIR is the dir this script is in ($SKIA_ROOT/infra/canvaskit)
BASE_DIR=`cd $(dirname ${BASH_SOURCE[0]}) && pwd`
CANVASKIT_DIR=$BASE_DIR/../../modules/canvaskit
# Start the aggregator in the background
/opt/gold-aggregator $@ &
# Run the tests
npx karma start $CANVASKIT_DIR/karma.conf.js --single-run
# Tell the aggregator to dump the json
# This curl command gets the HTTP code and stores it into $CODE
CODE=`curl -s -o /dev/null -I -w "%{http_code}" -X POST localhost:8081/dump_json`
if [ $CODE -ne 200 ]; then
# If we don't get 200 back, something is wrong with writing to disk, so exit with error
exit 1
fi
|
youtube/cobalt
|
third_party/skia/infra/canvaskit/test_canvaskit.sh
|
Shell
|
bsd-3-clause
| 1,128 |
#!/bin/sh
# $FreeBSD$
. `dirname $0`/conf.sh
echo "1..1"
us0=$(attach_md -t malloc -s 1M) || exit 1
us1=$(attach_md -t malloc -s 2M) || exit 1
us2=$(attach_md -t malloc -s 3M) || exit 1
gstripe create -s 16384 $name /dev/$us0 /dev/$us1 /dev/$us2 || exit 1
devwait
# Size of created device should be 1MB * 3.
size=`diskinfo /dev/stripe/${name} | awk '{print $3}'`
if [ $size -eq 3145728 ]; then
echo "ok 1"
else
echo "not ok 1"
fi
|
TigerBSD/TigerBSD
|
FreeBSD/tests/sys/geom/class/stripe/1_test.sh
|
Shell
|
isc
| 439 |
#!/usr/bin/env bash
usage()
{
echo "Publishes the NuGet packages to the specified location."
echo "For publishing to Azure the following properties are required."
echo " /p:CloudDropAccountName=\"account name\""
echo " /p:CloudDropAccessToken=\"access token\""
echo " /p:__BuildType=\"Configuration\""
echo " /p:__BuildArch=\"Architecture\""
echo "Configuration can be Release, Checked, or Debug"
echo "Architecture can be x64, x86, arm, or arm64"
exit 1
}
working_tree_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
publish_log=$working_tree_root/publish.log
# Use uname to determine what the OS is.
OSName=$(uname -s)
case $OSName in
Linux)
__BuildOS=Linux
;;
Darwin)
__BuildOS=OSX
;;
FreeBSD)
__BuildOS=FreeBSD
;;
OpenBSD)
__BuildOS=OpenBSD
;;
NetBSD)
__BuildOS=NetBSD
;;
SunOS)
__BuildOS=SunOS
;;
*)
echo "Unsupported OS $OSName detected, configuring as if for Linux"
__BuildOS=Linux
;;
esac
options="/nologo /v:minimal /flp:v=detailed;Append;LogFile=$publish_log"
echo "Running publish-packages.sh $*" > $publish_log
echo "Running init-tools.sh"
$working_tree_root/init-tools.sh
echo "Publishing packages..."
echo -e "\n$working_tree_root/Tools/corerun $working_tree_root/Tools/MSBuild.exe $working_tree_root/src/publish.proj $options $*" /p:__BuildOS=$__BuildOS >> $publish_log
$working_tree_root/Tools/corerun $working_tree_root/Tools/MSBuild.exe $working_tree_root/src/publish.proj $options $* /p:__BuildOS=$__BuildOS
if [ $? -ne 0 ]
then
echo -e "\nPackage publishing failed. Aborting." >> $publish_log
echo "ERROR: An error occurred while publishing packages; see $publish_log for more details. There may have been networking problems, so please try again in a few minutes."
exit 1
fi
echo "Publish completed successfully."
echo -e "\nPublish completed successfully." >> $publish_log
exit 0
|
LLITCHEV/coreclr
|
publish-packages.sh
|
Shell
|
mit
| 2,028 |
#!/bin/sh
#Format floppy disks
#Copyright (c) Barry Kauler 2004 www.goosee.com/puppy
#2007 Lesser GPL licence v2 (http://www.fsf.org/licensing/licenses/lgpl.html)
#130517 BK: code improved and internationalized.
export TEXTDOMAIN=floppy-format
export OUTPUT_CHARSET=UTF-8
zapfloppy()
{
# Puppy will only allow 1440, 1680K and 1760K capacities.
ERR0=1
while [ $ERR0 -ne 0 ];do
pwMSG="$(gettext 'Low-level formatting disk with this capacity:') ${1} Kbyte
$(gettext 'Please wait...')"
/usr/lib/gtkdialog/box_splash -close never -fontsize large -text "${pwMSG}" &
pwID=$!
fdformat /dev/fd0u$1
ERR0=$?
sync
#killall xmessage
pupkill $pwID
if [ $ERR0 -ne 0 ];then
xmessage -bg "#ffe0e0" -name "loformat" -title "$(gettext 'Puppy Low-level Formatter')" -center -buttons "$(gettext 'Try again')":20,"$(gettext 'QUIT')":10 -file -<<XMSG
$(gettext 'ERROR low-level formatting disk.')
$(gettext 'Is the write-protect tab closed?')
XMSG
AN0=$?
if [ $AN0 -eq 10 ];then
ERR0=0
fi
if [ $AN0 -eq 0 ];then
ERR0=0
fi
if [ $AN0 -eq 1 ];then
ERR0=0
fi
else
INTROMSG="
$(gettext '\ZbSUCCESS!\ZB')
$(gettext 'Now you should press the \ZbMsdos/vfat filesystem\ZB button.')"
fi
done
}
fsfloppy()
{
echo "$(gettext 'Creating msdos filesystem on the disk...')"
ERR1=1
while [ ! $ERR1 -eq 0 ];do
pwMSG="$(gettext 'Creating msdos/vfat filesystem on floppy disk')
$(gettext 'Please wait...')"
/usr/lib/gtkdialog/box_splash -close never -fontsize large -text "${pwMSG}" &
pwID=$!
mkfs.msdos -c /dev/fd0u$1
#mformat -f $1 a:
#mbadblocks a:
ERR1=$?
#killall xmessage
pupkill $pwID
if [ $ERR1 -ne 0 ];then
xmessage -bg "#ffe0e0" -name "msdosvfat" -title "$(gettext 'Floppy msdos/vfat filesystem')" -center \
-buttons "$(gettext 'Try again')":20,"$(gettext 'QUIT')":10 -file -<<XMSG
$(gettext 'ERROR creating msdos/vfat filesystem on the floppy disk.')
$(gettext 'Is the write-protect tab closed?')
XMSG
AN0=$?
if [ $AN0 -eq 10 ];then
ERR1=0
fi
if [ $AN0 -eq 0 ];then
ERR1=0
fi
if [ $AN0 -eq 1 ];then
ERR1=0
fi
else
INTROMSG="
$(gettext '\ZbSUCCESS!\ZB')
$(gettext 'The floppy disk is now ready to be used. Use the Puppy Drive Mounter to mount it. Or, click the floppy-disk icon on the desktop.')
$(gettext 'First though, press \ZbEXIT\ZB to get out of here...')"
fi
done
sync
echo "$(gettext '...done.')"
echo " "
}
INTROMSG="$(gettext '\ZbWELCOME!\ZB')
$(gettext 'The Puppy Floppy Formatter only formats floppies with 1440 Kbyte capacity and with the msdos/vfat filesystem, for interchangeability with Windows.')
$(gettext 'You only need to low-level format if the disk is formatted with some other capacity, or it is corrupted. You do not have to low-level format a new disk, but may do so to check its integrity.')
$(gettext 'A disk is NOT usable if it is only low-level formatted: it also must have a filesystem, so this must always be the second step.')
$(gettext 'Doing step-2 only, that is, creating a filesystem on a disk, is also a method for wiping any existing files.')"
#big loop...
while :; do
MNTDMSG=" "
mount | grep "/dev/fd0" > /dev/null 2>&1
if [ $? -eq 0 ];then #=0 if string found
CURRENTMNT="`mount | grep "/dev/fd0" | cut -f 3 -d ' '`"
#this tells Rox to close any window with this directory and subdirectories open...
rox -D "$CURRENTMNT"
sync
umount "$CURRENTMNT" #/mnt/floppy
if [ $? -ne 0 ];then
MNTDMSG="
$(gettext 'Puppy found a floppy disk already mounted in the drive, but is not able to unmount it. The disk must be unmounted now. Please click the \Zbclose box\ZB on the floppy-disk icon on the desktop, or use the Puppy Drive Mounter (click \Zbmount\ZB icon at top of screen) to unmount the floppy disk. DO THIS FIRST!')"
else
MNTDMSG="
$(gettext 'Puppy found that the floppy disk was mounted, but has now unmounted it. Now ok to format disk.')"
fi
fi
pressMSG="$(gettext 'Press a button:')"
pupdialog --colors --background '#e0ffe0' --title "$(gettext 'Puppy Floppy Formatter')" --extra-button --yes-label "$(gettext 'Low-level Format')" --no-label "$(gettext 'EXIT')" --extra-label "$(gettext 'Msdos/vfat filesystem')" --yesno "${INTROMSG}
${MNTDMSG}
${pressMSG}" 0 0
ANS=$?
case $ANS in
0) #low-level format
zapfloppy 1440
;;
3) #vfat
fsfloppy 1440
;;
1) #exit
break
;;
*)
break
;;
esac
done
###END###
|
ninaholic/woof-CE
|
woof-code/rootfs-skeleton/usr/sbin/floppy-format.sh
|
Shell
|
gpl-2.0
| 4,385 |
#!/bin/sh
# confirm that 'mv symlink symlink' doesn't remove symlink
# Based on an example from David Luyer.
# Copyright (C) 2001-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ mv
touch file || framework_failure_
ln -s file s || framework_failure_
# This must fail.
mv s s 2> /dev/null && fail=1
# But the symlink, s, must not be removed.
# Before 4.0.36, 's' would have been removed.
test -f s || fail=1
Exit $fail
|
houwentaoff/coreutils
|
tests/mv/into-self-4.sh
|
Shell
|
gpl-3.0
| 1,107 |
#!/bin/bash
#
# Contains the main cross compiler, that individually sets up each target build
# platform, compiles all the C dependencies, then build the requested executable
# itself.
#
# Usage: build.sh <import path>
#
# Needed environment variables:
# REPO_REMOTE - Optional VCS remote if not the primary repository is needed
# REPO_BRANCH - Optional VCS branch to use, if not the master branch
# DEPS - Optional list of C dependency packages to build
# PACK - Optional sub-package, if not the import path is being built
# OUT - Optional output prefix to override the package name
# FLAG_V - Optional verbosity flag to set on the Go builder
# FLAG_RACE - Optional race flag to set on the Go builder
# Download the canonical import path (may fail, don't allow failures beyond)
if [ $1 = "github.com/elastic/beats" ]; then
SRC_FOLDER=$SOURCE
DST_FOLDER=$GOPATH/src/$1
WORKING_DIRECTORY=$GOPATH/src/$1
else
SRC_FOLDER=$SOURCE
DST_FOLDER=$GOPATH/src/$1
WORKING_DIRECTORY=$GOPATH/src/`dirname $1`
fi
if [ "$SOURCE" != "" ]; then
mkdir -p ${DST_FOLDER}
echo "Copying main source folder ${SRC_FOLDER} to folder ${DST_FOLDER}"
rsync --exclude ".git" --exclude "build/" -a ${SRC_FOLDER}/ ${DST_FOLDER}
else
mkdir -p $GOPATH/src/`dirname $1`
cd $GOPATH/src/`dirname $1`
echo "Fetching main git repository $1 in folder $GOPATH/src/`dirname $1`"
git clone https://$1.git
fi
set -e
cd $WORKING_DIRECTORY
export GOPATH=$GOPATH:`pwd`/Godeps/_workspace
export GO15VENDOREXPERIMENT=1
# Switch over the code-base to another checkout if requested
if [ "$REPO_REMOTE" != "" ]; then
echo "Switching over to remote $REPO_REMOTE..."
if [ -d ".git" ]; then
git remote set-url origin $REPO_REMOTE
git pull
elif [ -d ".hg" ]; then
echo -e "[paths]\ndefault = $REPO_REMOTE\n" >> .hg/hgrc
hg pull
fi
fi
if [ "$REPO_BRANCH" != "" ]; then
echo "Switching over to branch $REPO_BRANCH..."
if [ -d ".git" ]; then
git checkout $REPO_BRANCH
elif [ -d ".hg" ]; then
hg checkout $REPO_BRANCH
fi
fi
# Download all the C dependencies
echo "Fetching dependencies..."
mkdir -p /deps
DEPS=($DEPS) && for dep in "${DEPS[@]}"; do
echo Downloading $dep
if [ "${dep##*.}" == "tar" ]; then wget -q $dep -O - | tar -C /deps -x; fi
if [ "${dep##*.}" == "gz" ]; then wget -q $dep -O - | tar -C /deps -xz; fi
if [ "${dep##*.}" == "bz2" ]; then wget -q $dep -O - | tar -C /deps -xj; fi
done
# Configure some global build parameters
NAME=`basename $1/$PACK`
if [ "$OUT" != "" ]; then
NAME=$OUT
fi
if [ "$FLAG_V" == "true" ]; then V=-v; fi
if [ "$FLAG_RACE" == "true" ]; then R=-race; fi
if [ "$STATIC" == "true" ]; then LDARGS=--ldflags\ \'-extldflags\ \"-static\"\'; fi
if [ -n $BEFORE_BUILD ]; then
chmod +x /scripts/$BEFORE_BUILD
echo "Execute /scripts/$BEFORE_BUILD ${1}"
/scripts/$BEFORE_BUILD ${1}
fi
# Build for each platform individually
echo "Compiling for linux/amd64..."
HOST=x86_64-linux PREFIX=/usr/local $BUILD_DEPS /deps
GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go get -d ./$PACK
sh -c "GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go build $V $R $LDARGS -o $NAME-linux-amd64$R ./$PACK"
echo "Compiling for linux/386..."
HOST=i686-linux PREFIX=/usr/local $BUILD_DEPS /deps
GOOS=linux GOARCH=386 CGO_ENABLED=1 go get -d ./$PACK
sh -c "GOOS=linux GOARCH=386 CGO_ENABLED=1 go build $V $LDARGS -o $NAME-linux-386 ./$PACK"
#echo "Compiling for linux/arm..."
#CC=arm-linux-gnueabi-gcc HOST=arm-linux PREFIX=/usr/local/arm $BUILD_DEPS /deps
#CC=arm-linux-gnueabi-gcc GOOS=linux GOARCH=arm CGO_ENABLED=1 GOARM=5 go get -d ./$PACK
#CC=arm-linux-gnueabi-gcc GOOS=linux GOARCH=arm CGO_ENABLED=1 GOARM=5 go build $V -o $NAME-linux-arm ./$PACK
#echo "Compiling for windows/amd64..."
#CC=x86_64-w64-mingw32-gcc HOST=x86_64-w64-mingw32 PREFIX=/usr/x86_64-w64-mingw32 $BUILD_DEPS /deps
#CC=x86_64-w64-mingw32-gcc GOOS=windows GOARCH=amd64 CGO_ENABLED=1 go get -d ./$PACK
#CC=x86_64-w64-mingw32-gcc GOOS=windows GOARCH=amd64 CGO_ENABLED=1 go build $V $R -o $NAME-windows-amd64$R.exe ./$PACK
#echo "Compiling for windows/386..."
#CC=i686-w64-mingw32-gcc HOST=i686-w64-mingw32 PREFIX=/usr/i686-w64-mingw32 $BUILD_DEPS /deps
#CC=i686-w64-mingw32-gcc GOOS=windows GOARCH=386 CGO_ENABLED=1 go get -d ./$PACK
#CC=i686-w64-mingw32-gcc GOOS=windows GOARCH=386 CGO_ENABLED=1 go build $V -o $NAME-windows-386.exe ./$PACK
#echo "Compiling for darwin/amd64..."
#CC=o64-clang HOST=x86_64-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps
#CC=o64-clang GOOS=darwin GOARCH=amd64 CGO_ENABLED=1 go get -d ./$PACK
#CC=o64-clang GOOS=darwin GOARCH=amd64 CGO_ENABLED=1 go build $V $R -o $NAME-darwin-amd64$R ./$PACK
#echo "Compiling for darwin/386..."
#CC=o32-clang HOST=i386-apple-darwin10 PREFIX=/usr/local $BUILD_DEPS /deps
#CC=o32-clang GOOS=darwin GOARCH=386 CGO_ENABLED=1 go get -d ./$PACK
#CC=o32-clang GOOS=darwin GOARCH=386 CGO_ENABLED=1 go build $V -o $NAME-darwin-386 ./$PACK
echo "Moving binaries to host..."
cp `ls -t | head -n 2` /build
|
yapdns/yapdnsbeat
|
vendor/github.com/elastic/beats/dev-tools/packer/docker/xgo-image-deb6/base/build.sh
|
Shell
|
mit
| 5,092 |
#! /bin/sh
rm -f config.cache
#Check if the autoreconf command is available, and use that if so.
if command -v autoreconf >/dev/null 2>&1 ; then
echo autoreconf...
autoreconf --install
else
if test -d /usr/local/share/aclocal ; then
ACLOCAL_FLAGS="$ACLOCAL_FLAGS -I /usr/local/share/aclocal"
fi
(command -v aclocal) < /dev/null > /dev/null 2>&1 || {
echo aclocal not found
exit 1
}
echo aclocal...
aclocal -I m4 $ACLOCAL_FLAGS
#The GNU libtoolize is called 'glibtoolize' on Darwin.
if [ "`echo $OSTYPE | grep darwin`" != "" ] ; then
LIBTOOLIZE="glibtoolize"
else
LIBTOOLIZE="libtoolize"
fi
(command -v $LIBTOOLIZE) < /dev/null > /dev/null 2>&1 || {
echo $LIBTOOLIZE not found
exit 1
}
echo $LIBTOOLIZE...
$LIBTOOLIZE --force --copy
(command -v autoheader) < /dev/null > /dev/null 2>&1 || {
echo autoheader not found
exit 1
}
echo autoheader...
autoheader
(command -v automake) < /dev/null > /dev/null 2>&1 || {
echo automake not found
exit 1
}
echo automake...
automake --gnu --add-missing --copy
(command -v autoconf) < /dev/null > /dev/null 2>&1 || {
echo autoconf not found
exit 1
}
echo autoconf...
autoconf
fi
|
ytaben/cyphesis
|
autogen.sh
|
Shell
|
gpl-2.0
| 1,251 |
#!/bin/sh
# Verify that id -G prints the right group when run set-GID.
# Copyright (C) 2012-2013 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ id
require_root_
g=$(id -u $NON_ROOT_USERNAME) || framework_failure_
# Construct a different group number.
gp1=$(expr $g + 1)
echo $gp1 > exp || framework_failure_
setuidgid -g $gp1 $NON_ROOT_USERNAME env PATH="$PATH" id -G > out || fail=1
compare exp out || fail=1
# With coreutils-8.16 and earlier, id -G would print both: $gp1 $g
Exit $fail
|
bu2/coreutils
|
tests/misc/id-setgid.sh
|
Shell
|
gpl-3.0
| 1,174 |
#!/bin/sh
# test diagnostics are printed immediately when seeking beyond device.
# Copyright (C) 2008-2016 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ dd
# need write access to local device
# (even though we don't actually write anything)
require_root_
require_local_dir_
get_device_size() {
BLOCKDEV=blockdev
$BLOCKDEV -V >/dev/null 2>&1 || BLOCKDEV=/sbin/blockdev
$BLOCKDEV --getsize64 "$1"
}
# Get path to device the current dir is on.
# Note df can only get fs size, not device size.
device=$(df --output=source . | tail -n1) || framework_failure_
dev_size=$(get_device_size "$device") ||
skip_ "failed to determine size of $device"
# Don't use shell arithmetic as older versions of dash use longs
DEV_OFLOW=$(expr $dev_size + 1) ||
skip_ "failed to adjust device size $dev_size"
timeout 10 dd bs=1 skip=$DEV_OFLOW count=0 status=noxfer < "$device" 2> err
test "$?" = "1" || fail=1
echo "dd: 'standard input': cannot skip: Invalid argument
0+0 records in
0+0 records out" > err_ok || framework_failure_
compare err_ok err || fail=1
timeout 10 dd bs=1 seek=$DEV_OFLOW count=0 status=noxfer > "$device" 2> err
test "$?" = "1" || fail=1
echo "dd: 'standard output': cannot seek: Invalid argument
0+0 records in
0+0 records out" > err_ok || framework_failure_
compare err_ok err || fail=1
Exit $fail
|
sbu-fsl/tc-coreutils
|
tests/dd/skip-seek-past-dev.sh
|
Shell
|
gpl-3.0
| 2,003 |
#!/bin/sh -ex
rm -f nlopt-2.3.tar.gz
curl -OL http://ab-initio.mit.edu/nlopt/nlopt-2.3.tar.gz
tar -zxvf nlopt-2.3.tar.gz
cd nlopt-2.3
env CFLAGS="-fPIC" ./configure && make
sudo make install
cd ..
rm -rf nlopt-2.3
rm -f libsrs2-1.0.18.tar.gz
curl -OL http://www.libsrs2.org/srs/libsrs2-1.0.18.tar.gz
tar -zxvf libsrs2-1.0.18.tar.gz
cd libsrs2-1.0.18
./configure && make
sudo make install
cd ..
rm -rf libsrs2-1.0.18
|
avsm/opam-ppa
|
jenkins/10-opam-source-install.sh
|
Shell
|
gpl-3.0
| 418 |
#!/bin/bash
set -eEuo pipefail
# wait for bootstrap to apply config entries
wait_for_config_entry proxy-defaults global
wait_for_config_entry service-resolver s2
gen_envoy_bootstrap s1 19000 primary
gen_envoy_bootstrap s2 19001 primary
gen_envoy_bootstrap s3 19002 primary
|
scalp42/consul
|
test/integration/connect/envoy/case-cfg-resolver-svc-redirect-tcp/setup.sh
|
Shell
|
mpl-2.0
| 275 |
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
################################################################################
# This end-to-end test verifies that manually taking a savepoint of a running
# job and resuming from it works properly. It allows resuming the job with
# a different parallelism than the original execution.
#
# Using the general purpose DataStream job, the test covers savepointing and
# resuming when using different state backends (file, RocksDB), as well as the
# following types of states:
# - Operator re-partitionable list state
# - Broadcast state
# - Union state
# - Keyed state (ValueState)
#
# The general purpose DataStream job is self-verifiable, such that if any
# unexpected error occurs during savepoints or restores, exceptions will be
# thrown; if exactly-once is violated, alerts will be sent to output (and
# caught by the test script to fail the job).
################################################################################
if [ -z $1 ] || [ -z $2 ]; then
echo "Usage: ./test_resume_savepoint.sh <original_dop> <new_dop> <state_backend_setting> <state_backend_file_async_setting>"
exit 1
fi
source "$(dirname "$0")"/common.sh
ORIGINAL_DOP=$1
NEW_DOP=$2
STATE_BACKEND_TYPE=${3:-file}
STATE_BACKEND_FILE_ASYNC=${4:-true}
STATE_BACKEND_ROCKS_TIMER_SERVICE_TYPE=${5:-rocks}
run_resume_savepoint_test() {
if (( $ORIGINAL_DOP >= $NEW_DOP )); then
NUM_SLOTS=$ORIGINAL_DOP
else
NUM_SLOTS=$NEW_DOP
fi
set_config_key "taskmanager.numberOfTaskSlots" "${NUM_SLOTS}"
if [ $STATE_BACKEND_ROCKS_TIMER_SERVICE_TYPE == 'heap' ]; then
set_config_key "state.backend.rocksdb.timer-service.factory" "heap"
fi
set_config_key "metrics.fetcher.update-interval" "2000"
setup_flink_slf4j_metric_reporter
start_cluster
CHECKPOINT_DIR="file://$TEST_DATA_DIR/savepoint-e2e-test-chckpt-dir"
# run the DataStream allroundjob
TEST_PROGRAM_JAR=${END_TO_END_DIR}/flink-datastream-allround-test/target/DataStreamAllroundTestProgram.jar
DATASTREAM_JOB=$($FLINK_DIR/bin/flink run -d -p $ORIGINAL_DOP $TEST_PROGRAM_JAR \
--test.semantics exactly-once \
--environment.parallelism $ORIGINAL_DOP \
--state_backend $STATE_BACKEND_TYPE \
--state_backend.checkpoint_directory $CHECKPOINT_DIR \
--state_backend.file.async $STATE_BACKEND_FILE_ASYNC \
--sequence_generator_source.sleep_time 30 \
--sequence_generator_source.sleep_after_elements 1 \
| grep "Job has been submitted with JobID" | sed 's/.* //g')
wait_job_running $DATASTREAM_JOB
wait_oper_metric_num_in_records SemanticsCheckMapper.0 200
# take a savepoint of the state machine job
SAVEPOINT_PATH=$(stop_with_savepoint $DATASTREAM_JOB $TEST_DATA_DIR \
| grep "Savepoint completed. Path:" | sed 's/.* //g')
wait_job_terminal_state "${DATASTREAM_JOB}" "FINISHED"
# isolate the path without the scheme ("file:") and do the necessary checks
SAVEPOINT_DIR=${SAVEPOINT_PATH#"file:"}
if [ -z "$SAVEPOINT_DIR" ]; then
echo "Savepoint location was empty. This may mean that the stop-with-savepoint failed."
exit 1
elif [ ! -d "$SAVEPOINT_DIR" ]; then
echo "Savepoint $SAVEPOINT_PATH does not exist."
exit 1
fi
# Since it is not possible to differentiate reporter output between the first and second execution,
# we remember the number of metrics sampled in the first execution so that they can be ignored in the following monitorings
OLD_NUM_METRICS=$(get_num_metric_samples)
# resume state machine job with savepoint
DATASTREAM_JOB=$($FLINK_DIR/bin/flink run -s $SAVEPOINT_PATH -p $NEW_DOP -d $TEST_PROGRAM_JAR \
--test.semantics exactly-once \
--environment.parallelism $NEW_DOP \
--state_backend $STATE_BACKEND_TYPE \
--state_backend.checkpoint_directory $CHECKPOINT_DIR \
--state_backend.file.async $STATE_BACKEND_FILE_ASYNC \
--sequence_generator_source.sleep_time 15 \
--sequence_generator_source.sleep_after_elements 1 \
| grep "Job has been submitted with JobID" | sed 's/.* //g')
wait_job_running $DATASTREAM_JOB
wait_oper_metric_num_in_records SemanticsCheckMapper.0 200
# if state is errorneous and the state machine job produces alerting state transitions,
# output would be non-empty and the test will not pass
}
run_test_with_timeout 900 run_resume_savepoint_test
|
kl0u/flink
|
flink-end-to-end-tests/test-scripts/test_resume_savepoint.sh
|
Shell
|
apache-2.0
| 5,256 |
#!/bin/bash
FN="h20kcod.db_3.4.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/annotation/src/contrib/h20kcod.db_3.4.0.tar.gz"
"https://bioarchive.galaxyproject.org/h20kcod.db_3.4.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-h20kcod.db/bioconductor-h20kcod.db_3.4.0_src_all.tar.gz"
)
MD5="3647facb272f58424f4c94ef92e8ee45"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-h20kcod.db/post-link.sh
|
Shell
|
mit
| 1,298 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
npm run build
mkdir -p tmp
if [ ! -d "tmp/gh-pages" ]; then
git clone [email protected]:Hacker0x01/react-datepicker.git --branch gh-pages --single-branch tmp/gh-pages
fi
cd tmp/gh-pages
git pull
find . -maxdepth 1 ! -name '.git' ! -name '.' -exec rm -r {} \;
cp -r $DIR/{bundle.js,index.html,style.css,images} ./
git add --all
git commit -m "Publish new docs (automated commit)"
git push
|
sss0791/react-datepicker
|
docs/publish.sh
|
Shell
|
mit
| 474 |
#!/bin/sh
# Copyright (c) 1993, 1994, 1995, 1996 Rick Sladkey <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# $Id: syscallent.sh,v 1.3 2003/03/31 01:03:34 roland Exp $
cat ${1+"$@"} |
sed -n 's/^#[ ]*define[ ][ ]*SYS_\([^ ]*\)[ ]*[^0-9]*\([0-9]*\).*$/\1 \2/p
s/^#[ ]*define[ ][ ]*__NR_\([^ ]*\)[ ]*[^0-9]*\([0-9]*\).*$/\1 \2/p
s/^#[ ]*define[ ][ ]*__NR_\([^ ]*\)[ ]*[^0-9()]*(__NR_Linux + \([0-9]*\))$/\1 \2/p' |
sort +1n | uniq |
awk '
BEGIN {
tabs = "\t\t\t\t\t\t\t\t"
call = -1;
}
{
while (++call < $2) {
f = "printargs"
n = "SYS_" call
s = "\t{ -1,\t0,\t"
s = s f ","
s = s substr(tabs, 1, 24/8 - int((length(f) + 1)/8))
s = s "\"" n "\""
s = s substr(tabs, 1, 16/8 - int((length(n) + 2)/8))
s = s "}, /* " call " */"
print s
}
f = "sys_" $1
n = $1
s = "\t{ -1,\t0,\t"
s = s f ","
s = s substr(tabs, 1, 24/8 - int((length(f) + 1)/8))
s = s "\"" n "\""
s = s substr(tabs, 1, 16/8 - int((length(n) + 2)/8))
s = s "}, /* " call " */"
print s
}
END {
limit = call + 100
while (++call < limit) {
f = "printargs"
n = "SYS_" call
s = "\t{ -1,\t0,\t"
s = s f ","
s = s substr(tabs, 1, 24/8 - int((length(f) + 1)/8))
s = s "\"" n "\""
s = s substr(tabs, 1, 16/8 - int((length(n) + 2)/8))
s = s "}, /* " call " */"
print s
}
}
'
|
ZHAW-INES/rioxo-uClinux-dist
|
user/strace/syscallent.sh
|
Shell
|
gpl-2.0
| 2,709 |
#!/bin/sh
: ==== start ====
TESTNAME=nat-pluto-06
source /testing/pluto/bin/northlocal.sh
echo done
|
y-trudeau/openswan-patch-meraki
|
testing/pluto/nat-pluto-06/northinit.sh
|
Shell
|
gpl-2.0
| 104 |
#!/usr/bin/env bash
#
# findMissingTranslations.sh
#
# Locate all language strings needing an update based on English
#
# Usage: findMissingTranslations.sh [language codes]
#
# If no language codes are specified then all languages will be checked
#
LANGHOME="Marlin/src/lcd/language"
[ -d $LANGHOME ] && cd $LANGHOME
FILES=$(ls language_*.h | grep -v -E "(_en|_test)\.h" | sed -E 's/language_([^\.]+)\.h/\1/')
declare -A STRING_MAP
# Get files matching the given arguments
TEST_LANGS=$FILES
if [[ -n $@ ]]; then
TEST_LANGS=""
for K in "$@"; do
for F in $FILES; do
[[ "$F" != "${F%$K*}" ]] && TEST_LANGS="$TEST_LANGS $F"
done
done
fi
echo -n "Building list of missing strings..."
for i in $(awk '/#ifndef/{print $2}' language_en.h); do
[[ $i == "LANGUAGE_EN_H" || $i == "CHARSIZE" ]] && continue
LANG_LIST=""
for j in $TEST_LANGS; do
[[ $(grep -c " ${i} " language_${j}.h) -eq 0 ]] && LANG_LIST="$LANG_LIST $j"
done
[[ -z $LANG_LIST ]] && continue
STRING_MAP[$i]=$LANG_LIST
done
echo
for K in $( printf "%s\n" "${!STRING_MAP[@]}" | sort ); do
case "$#" in
1 ) echo $K ;;
* ) printf "%-35s :%s\n" "$K" "${STRING_MAP[$K]}" ;;
esac
done
|
simon-jouet/Marlin
|
buildroot/share/scripts/findMissingTranslations.sh
|
Shell
|
gpl-3.0
| 1,189 |
#!/bin/bash
set -e
set -o pipefail
name=$(basename $0 .sh)
result=$(mktemp -t ${name}.out.XXXXXX)
stderr=$(mktemp -t ${name}.out.XXXXXX)
ret=0
rm -f test_file
# First make sure that this OVAL fails, then scan again with --remediate
$OSCAP xccdf eval --results $result $srcdir/${name}.xccdf.xml 2> $stderr || ret=$?
[ $ret -eq 2 ]
echo "Stderr file = $stderr"
echo "Result file = $result"
[ -f $stderr ]; [ ! -s $stderr ]; :> $stderr
$OSCAP xccdf validate-xml $result
assert_exists 1 '//Value'
assert_exists 1 '//Value/title'
assert_exists 3 '//Value/value'
assert_exists 0 '//Value/value[text()="test_file"]'
assert_exists 1 '/Benchmark/Rule'
assert_exists 1 '/Benchmark/Rule/fix'
assert_exists 2 '/Benchmark/Rule/fix/sub'
assert_exists 1 '//rule-result'
assert_exists 1 '//rule-result/result'
assert_exists 1 '//rule-result/result[text()="fail"]'
assert_exists 0 '//rule-result/fix'
assert_exists 1 '//score'
assert_exists 1 '//score[text()="0.000000"]'
:> $result
$OSCAP xccdf eval --remediate --results $result $srcdir/${name}.xccdf.xml 2> $stderr
[ -f $stderr ]; [ ! -s $stderr ]; rm $stderr
$OSCAP xccdf validate-xml $result
assert_exists 1 '//Value'
assert_exists 1 '//Value/title'
assert_exists 3 '//Value/value'
assert_exists 3 '//Value/value'
assert_exists 0 '//Value/value[text()="test_file"]'
assert_exists 1 '/Benchmark/Rule'
assert_exists 1 '/Benchmark/Rule/fix'
assert_exists 2 '/Benchmark/Rule/fix/sub'
assert_exists 1 '//TestResult'
assert_exists 0 '//TestResult/profile'
assert_exists 1 '//rule-result'
assert_exists 1 '//rule-result/result'
assert_exists 1 '//rule-result/result[text()="fixed"]'
assert_exists 1 '//rule-result/fix'
assert_exists 1 '//rule-result/fix[@system="urn:xccdf:fix:script:sh"]'
assert_exists 1 '//rule-result/fix[contains(text(), "touch test_file")]'
assert_exists 1 '//rule-result/fix[contains(text(), "chmod a-x test_file")]'
assert_exists 0 '//rule-result/fix/sub'
assert_exists 1 '//rule-result/message'
assert_exists 1 '//rule-result/message[@severity="info"]'
assert_exists 1 '//rule-result/message[text()="Fix execution completed and returned: 0"]'
assert_exists 1 '//score'
assert_exists 1 '//score[text()="0.000000"]'
rm test_file
rm $result
|
openprivacy/openscap
|
tests/API/XCCDF/unittests/test_remediation_subs_value_title.sh
|
Shell
|
lgpl-2.1
| 2,204 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
export FORCE_ANSWER_TO_QUESTIONS=${FORCE_ANSWER_TO_QUESTIONS:="quit"}
export REMEMBER_LAST_ANSWER="true"
export PRINT_INFO_FROM_SCRIPTS="false"
export SKIP_CHECK_REMOTE_IMAGE="true"
if [[ $# -eq 0 ]]; then
params=("tests/bats/in_container/")
else
params=("${@}")
fi
# shellcheck source=scripts/ci/static_checks/in_container_bats_tests.sh
. "$( dirname "${BASH_SOURCE[0]}" )/../static_checks/in_container_bats_tests.sh" "${params[@]}"
|
airbnb/airflow
|
scripts/ci/pre_commit/pre_commit_in_container_bats_test.sh
|
Shell
|
apache-2.0
| 1,249 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPTDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
TARGET=geode
GEODE_FORK=${1}
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
GEODE_BRANCH=${2:-${CURRENT_BRANCH}}
. ${SCRIPTDIR}/shared/utilities.sh
SANITIZED_GEODE_BRANCH=$(getSanitizedBranch ${GEODE_BRANCH})
SANITIZED_GEODE_FORK=$(getSanitizedFork ${GEODE_FORK})
if [[ -z "${GEODE_FORK}" ]]; then
echo "No fork provided!"
exit 1
fi
if [[ "${GEODE_FORK}" == "apache" ]]; then
echo "This utility is not for primary pipelines."
exit 1
fi
echo "Fork is ${GEODE_FORK}"
echo "Branch is ${GEODE_BRANCH}"
echo "Deleting meta pipeline if it exists..."
META_PIPELINE="${SANITIZED_GEODE_FORK}-${SANITIZED_GEODE_BRANCH}-meta"
fly -t ${TARGET} destroy-pipeline --non-interactive -p ${META_PIPELINE}
echo "Deleting images pipeline if it exists..."
IMAGES_PIPELINE="${SANITIZED_GEODE_FORK}-${SANITIZED_GEODE_BRANCH}-images"
fly -t ${TARGET} destroy-pipeline --non-interactive -p ${IMAGES_PIPELINE}
echo "Deleting reaper pipeline if it exists..."
REAPER_PIPELINE="${SANITIZED_GEODE_FORK}-${SANITIZED_GEODE_BRANCH}-reaper"
fly -t ${TARGET} destroy-pipeline --non-interactive -p ${REAPER_PIPELINE}
echo "Deleting build pipeline if it exists..."
BUILD_PIPELINE="${SANITIZED_GEODE_FORK}-${SANITIZED_GEODE_BRANCH}-main"
fly -t ${TARGET} destroy-pipeline --non-interactive -p ${BUILD_PIPELINE}
gcloud container images list | grep "${SANITIZED_GEODE_FORK}-${SANITIZED_GEODE_BRANCH}" | while IFS= read -r line; do
echo "Deleting image: ${line}"
gcloud container images delete ${line}:latest --quiet
gcloud container images list-tags ${line} --filter='-tags:*' --format='get(digest)' | while IFS= read -r line2; do
echo "Deleting image: ${line2}"
gcloud container images delete ${line}@${line2} --quiet
done
done
gcloud compute images list | awk "/^${SANITIZED_GEODE_FORK}-${SANITIZED_GEODE_BRANCH}/ {print \$1}" | while IFS= read -r line; do
echo "Deleting image: ${line}"
gcloud compute images delete ${line} --quiet
done
|
smgoller/geode
|
ci/pipelines/clean_fork_pipelines.sh
|
Shell
|
apache-2.0
| 3,170 |
#!/bin/bash
#
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Search the current directory and its subdirectories for PNG and SVG images,
# and output an HTML page containing the images.
#
# Usage: ./generate_image_html.sh
#
source=$(pwd | sed -e "s|^.*chrome/src/||")
cat <<HTMLBEGIN
<!doctype html>
<html>
<head>
<style>
body {
font-family: "Roboto", monospace;
font-size: 16pt;
}
a[href] {
text-decoration: none;
}
img {
margin: 10px;
background: #eee;
object-fit: contain;
width: 100px;
height: 100px;
}
p {
text-align: top;
}
</style>
</head>
<body>
<h3>
image source: <a href="https://cs.chromium.org?q=${source}">${source}</a>
</h3>
HTMLBEGIN
output_html_image_element() {
echo "<img src='${1}' title='${1}' class='${2}'>"
}
find_directories_containing_images() {
find . | grep -e "\.svg$" -e "\.png$" | while read image; do
echo $(dirname ${image})
done | sort | uniq
}
for directory in $(find_directories_containing_images); do
# generate HTML for the directory PNG images.
echo "<h4>sub-directory ${directory} PNG images</h4><p>"
ls ${directory} | grep -e "\.png$" | while read image; do
output_html_image_element "${directory}/${image}" PNG
done ; echo "</p>"
# There are no SVG images in the "2x" sub-directories.
if grep -q "/2x" <<< "${directory}"; then
continue
fi
# generate HTML for the directory SVG images.
echo "<h4>sub-directory ${directory} SVG images</h4><p>"
ls ${directory} | grep -e "\.svg$" | while read image; do
output_html_image_element "${directory}/${image}" SVG
done ; echo "</p>"
done
cat <<HTMLEND
</body>
</html>
HTMLEND
|
nwjs/chromium.src
|
ui/file_manager/file_manager/foreground/images/generate_image_html.sh
|
Shell
|
bsd-3-clause
| 1,789 |
#!/bin/sh
#
# Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 4627316 6743526
# @summary Test option to limit direct memory allocation
#
# @requires (os.arch == "x86_64") | (os.arch == "amd64") | (os.arch == "sparcv9")
# @build LimitDirectMemory
# @run shell LimitDirectMemory.sh
TMP1=tmp_$$
runTest() {
echo "Testing: $*"
${TESTJAVA}/bin/java ${TESTVMOPTS} $*
if [ $? -eq 0 ]
then echo "--- passed as expected"
else
echo "--- failed"
exit 1
fi
}
launchFail() {
echo "Testing: -XX:MaxDirectMemorySize=$* -cp ${TESTCLASSES} \
LimitDirectMemory true DEFAULT DEFAULT+1M"
${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:MaxDirectMemorySize=$* -cp ${TESTCLASSES} \
LimitDirectMemory true DEFAULT DEFAULT+1M > ${TMP1} 2>&1
cat ${TMP1}
cat ${TMP1} | grep -s "Unrecognized VM option: \'MaxDirectMemorySize="
if [ $? -ne 0 ]
then echo "--- failed as expected"
else
echo "--- failed"
exit 1
fi
}
# $java LimitDirectMemory throwp fill_direct_memory size_per_buffer
# Memory is properly limited using multiple buffers.
runTest -XX:MaxDirectMemorySize=10 -cp ${TESTCLASSES} LimitDirectMemory true 10 1
runTest -XX:MaxDirectMemorySize=1k -cp ${TESTCLASSES} LimitDirectMemory true 1k 100
runTest -XX:MaxDirectMemorySize=10m -cp ${TESTCLASSES} LimitDirectMemory true 10m 10m
# We can increase the amount of available memory.
runTest -XX:MaxDirectMemorySize=65M -cp ${TESTCLASSES} \
LimitDirectMemory false 64M 65M
# Exactly the default amount of memory is available.
runTest -cp ${TESTCLASSES} LimitDirectMemory false 10 1
runTest -Xmx64m -cp ${TESTCLASSES} LimitDirectMemory false 0 DEFAULT
runTest -Xmx64m -cp ${TESTCLASSES} LimitDirectMemory true 0 DEFAULT+1
# We should be able to eliminate direct memory allocation entirely.
runTest -XX:MaxDirectMemorySize=0 -cp ${TESTCLASSES} LimitDirectMemory true 0 1
# Setting the system property should not work so we should be able to allocate
# the default amount.
runTest -Dsun.nio.MaxDirectMemorySize=1K -Xmx64m -cp ${TESTCLASSES} \
LimitDirectMemory false DEFAULT-1 DEFAULT/2
# Various bad values fail to launch the VM.
launchFail foo
launchFail 10kmt
launchFail -1
# Clean-up
rm ${TMP1}
|
FauxFaux/jdk9-jdk
|
test/java/nio/Buffer/LimitDirectMemory.sh
|
Shell
|
gpl-2.0
| 3,183 |
#!/bin/bash
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
# Some common method for release scripts
# A release candidate is created from a branch named "release-%name%"
# where %name% is the name of the release. Once promoted to a release,
# A tag %name% will be created from this branch and the corresponding
# branch removed.
# The last commit of the release branch is always a commit containing
# the release notes in the commit message and updating the CHANGELOG.md.
# This last commit will be cherry-picked back in the master branch
# when the release candidate is promoted to a release.
# To follow tracks and to support how CI systems fetch the refs, we
# store two commit notes: the release name and the candidate number.
# Get the short hash of a commit
function __git_commit_hash() {
git rev-parse "${1}"
}
# Get the subject (first line of the commit message) of a commit
function __git_commit_subject() {
git show -s --pretty=format:%s "$@"
}
# Returns the branch name of the current git repository
function git_get_branch() {
git symbolic-ref --short HEAD
}
# Returns the tag name of the current git repository
function git_get_tag() {
git describe --tag
}
# Show the commit message of the ref specified in argument
function git_commit_msg() {
git show -s --pretty=format:%B "$@"
}
# Extract the release candidate number from the git branch name
function get_release_candidate() {
# Match rcX and return X
git_get_branch 2>/dev/null | grep -Po "(?<=rc)([0-9]|\.)*$" || true
}
# Extract the release name from the git branch name
function get_release_name() {
# Match branch name release-X.X.X-rcY and return X.X.X
# or match tag name X.X.X and return X.X.X
git_get_branch 2>/dev/null | grep -Po "(?<=release-)([0-9]|\.)*(?=rc)" || git_get_tag | grep -Po "^([0-9]|\.)*$" || true
}
# Get the list of commit hashes between two revisions
function git_log_hash() {
local baseline="$1"
local head="$2"
shift 2
git log --pretty=format:%H "${baseline}".."${head}" "$@"
}
# Extract the full release name from the branch name or tag name
function get_full_release_name() {
local name="$(get_release_name "$@")"
local rc="$(get_release_candidate "$@")"
if [ -n "${rc}" ]; then
echo "${name}rc${rc}"
else
echo "${name}"
fi
}
# Returns the info from the branch of the release. It is the current branch
# but it errors out if the current branch is not a release branch. This
# method returns the tag of the release and the number of the current
# candidate in this release.
function get_release_branch() {
local branch_name=$(git_get_branch)
if [ -z "$(get_release_name)" ] || [ -z "$(get_release_candidate)" ]; then
echo "Not a release branch: ${branch_name}." >&2
exit 1
fi
echo "${branch_name}"
}
# fmt behaves differently on *BSD and on GNU/Linux, use fold.
function wrap_text() {
fold -s -w $1 | sed 's/ *$//'
}
|
twitter-forks/bazel
|
scripts/release/common.sh
|
Shell
|
apache-2.0
| 3,458 |
#!/bin/bash
#
# vim: set ts=4 sw=4 et:
#
# Passed arguments:
# $1 - pkgname [REQUIRED]
# $2 - cross target [OPTIONAL]
if [ $# -lt 1 -o $# -gt 2 ]; then
echo "${0##*/}: invalid number of arguments: pkgname [cross-target]"
exit 1
fi
PKGNAME="$1"
XBPS_CROSS_BUILD="$2"
for f in $XBPS_SHUTILSDIR/*.sh; do
. $f
done
setup_pkg "$PKGNAME" $XBPS_CROSS_BUILD
for f in $XBPS_COMMONDIR/environment/fetch/*.sh; do
source_file "$f"
done
XBPS_FETCH_DONE="${XBPS_STATEDIR}/${sourcepkg}_${XBPS_CROSS_BUILD}_fetch_done"
if [ -f "$XBPS_FETCH_DONE" ]; then
exit 0
fi
# Run pre-fetch hooks.
run_pkg_hooks pre-fetch
# If template defines pre_fetch(), use it.
if declare -f pre_fetch >/dev/null; then
run_func pre_fetch
fi
# If template defines do_fetch(), use it rather than the hooks.
if declare -f do_fetch >/dev/null; then
cd ${XBPS_BUILDDIR}
[ -n "$build_wrksrc" ] && mkdir -p "$wrksrc"
run_func do_fetch
else
# Run do-fetch hooks.
run_pkg_hooks "do-fetch"
fi
cd ${XBPS_BUILDDIR} || msg_error "$pkgver: cannot access wrksrc directory [$wrksrc]\n"
# if templates defines post_fetch(), use it.
if declare -f post_fetch >/dev/null; then
run_func post_fetch
fi
# Run post-fetch hooks.
run_pkg_hooks post-fetch
touch -f $XBPS_FETCH_DONE
exit 0
|
oliver-cfc/void-packages
|
common/xbps-src/libexec/xbps-src-dofetch.sh
|
Shell
|
bsd-2-clause
| 1,285 |
# The below functions have been pulled from the following Stack Overflow
# answer: http://stackoverflow.com/a/5196220/3898812 from the user John Kugleman
# and they are all kinds of awesome.
. /etc/init.d/functions
# Use step(), try(), and next() to perform a series of commands and print
# [ OK ] or [FAILED] at the end. The step as a whole fails if any individual
# command fails.
#
# Example:
# step "Remounting / and /boot as read-write:"
# try mount -o remount,rw /
# try mount -o remount,rw /boot
# next
step() {
echo -n "$@"
STEP_OK=0
[[ -w /tmp ]] && echo $STEP_OK > /tmp/step.$$
}
try() {
# Check for `-b' argument to run command in the background.
local BG=
[[ $1 == -b ]] && { BG=1; shift; }
[[ $1 == -- ]] && { shift; }
# Run the command.
if [[ -z $BG ]]; then
"$@"
else
"$@" &
fi
# Check if command failed and update $STEP_OK if so.
local EXIT_CODE=$?
if [[ $EXIT_CODE -ne 0 ]]; then
STEP_OK=$EXIT_CODE
[[ -w /tmp ]] && echo $STEP_OK > /tmp/step.$$
if [[ -n $LOG_STEPS ]]; then
local FILE=$(readlink -m "${BASH_SOURCE[1]}")
local LINE=${BASH_LINENO[0]}
echo "$FILE: line $LINE: Command \`$*' failed with exit code $EXIT_CODE." >> "$LOG_STEPS"
fi
fi
return $EXIT_CODE
}
next() {
[[ -f /tmp/step.$$ ]] && { STEP_OK=$(< /tmp/step.$$); rm -f /tmp/step.$$; }
[[ $STEP_OK -eq 0 ]] && echo_success || echo_failure
echo
return $STEP_OK
}
|
philippreston/OpenMAMA
|
release_scripts/helper-functions.sh
|
Shell
|
lgpl-2.1
| 1,545 |
#!/bin/bash -x
connect_server_list_jobs(){
exec sqoop2 << EOF
set server --host localhost --port 12000
show server --all
show job --all
exit
EOF
}
check_sqoop2(){
res=`connect_server_list_jobs`
if [ `echo $res | grep "localhost" | wc -l` -lt 1 ]; then
echo "sqoop2 is not available"
exit 1
else
if [ `echo $res | grep "job(s) to show" | wc -l` -lt 1 ]; then
echo "sqoop2 is not available"
exit 1
else
echo "sqoop2 is available"
exit 0
fi
fi
}
check_sqoop2
|
shakamunyi/sahara
|
sahara/tests/integration/tests/resources/sqoop2_service_test.sh
|
Shell
|
apache-2.0
| 567 |
export a=1
export a
export a=1 b=2
|
siosio/intellij-community
|
plugins/sh/testData/oldParser/exportParse.sh
|
Shell
|
apache-2.0
| 35 |
#!/bin/sh
uudecode espudp1.puu
echo -n test espudp1...
../tcpdump -t -n -E "file esp-secrets.txt" -r espudp1.pcap >espudp1.new
if diff espudp1.new espudp1.out
then
echo passed.
else
echo failed.
fi
|
rhuitl/uClinux
|
user/tcpdump/tests/espudp1.sh
|
Shell
|
gpl-2.0
| 203 |
#!/bin/sh
# find root
cd `dirname $PWD/$0` ; cd ..
#TODO: add support for ccache
# XXX. fails with >1
[ -z "${MAKE_JOBS}" ] && MAKE_JOBS=8
OLD_LDFLAGS="${LDFLAGS}"
unset LDFLAGS
export CC="emcc -Os -s WASM=1 -s SIDE_MODULE=1"
export AR="emar"
CFGFLAGS="./configure --prefix=/usr --disable-debugger --with-compiler=wasm --without-pic --with-nonpic"
make mrproper
cp -f plugins.emscripten.cfg plugins.cfg
./configure-plugins
./configure ${CFGFLAGS} --host=wasm && \
make -s -j ${MAKE_JOBS} DEBUG=0
|
jduck/radare2
|
sys/wasm.sh
|
Shell
|
lgpl-3.0
| 503 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ mogene10stv1probe
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-mogene10stv1probe/pre-unlink.sh
|
Shell
|
mit
| 64 |
#!/bin/bash
# Starts an Ubuntu 14.04 Docker container and runs the Yarn end-to-end test on it
set -ex
./data/start-ubuntu.sh ubuntu:14.04
|
kasperlewau/yarn
|
end_to_end_tests/test-ubuntu-14.04.sh
|
Shell
|
bsd-2-clause
| 138 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Cleans & runs the integration tests in the test container (mesosphere/kubernetes-mesos-test).
#
# Prerequisite:
# ./cluster/mesos/docker/test/build.sh
#
# Example Usage:
# ./contrib/mesos/ci/test-integration.sh
set -o errexit
set -o nounset
set -o pipefail
set -o errtrace
TEST_ARGS="$@"
KUBE_ROOT=$(cd "$(dirname "${BASH_SOURCE}")/../../.." && pwd)
"${KUBE_ROOT}/contrib/mesos/ci/run.sh" make clean test-integration ${TEST_ARGS}
|
aclisp/kubernetes
|
contrib/mesos/ci/test-integration.sh
|
Shell
|
apache-2.0
| 1,037 |
rm -f $MYSQLTEST_VARDIR/master-data/test/rpl_misc_functions.outfile
|
HiSPARC/station-software
|
user/mysql/mysql-test/suite/rpl/t/rpl_misc_functions-slave.sh
|
Shell
|
gpl-3.0
| 68 |
#!/bin/sh
set -e # Exit on error
echo 'Provisioning Environment'
if which smtp-sink > /dev/null; then
echo ' smtp-sink is already installed'
else
echo ' Updating apt-get repositories'
sudo apt-get -y -qq update > /dev/null
echo ' Installing postfix'
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -qq install postfix > /dev/null 2>&1
echo ' Stopping postfix'
sudo /etc/init.d/postfix stop > /dev/null
fi
killall -9 smtp-sink > /dev/null 2>&1 || true
HOSTNAME=`hostname -f`
echo ' Starting smtp-sink on '$IP':'$SMTP_PORT' with hostname '$HOSTNAME
smtp-sink -h $HOSTNAME $IP:$SMTP_PORT 5000 &
echo 'Environment has been provisioned.'
|
schutm/wakala
|
environments/resources/provision.sh
|
Shell
|
isc
| 681 |
#!/usr/bin/env bash
#Petit script pour afficher la config du reverse proxy pour un service via un curl dans Consul
#zf170515.0921
curl -X GET http://10.92.103.53:8500/v1/catalog/services |jq "" |grep "Host:" |grep -i $1
|
zuzu59/deploy-proxmox
|
showsdfrp.sh
|
Shell
|
mit
| 224 |
#!/bin/bash
#
# Copyright (c) 2009-2014 finfra.com <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
x=`rpm -qa|grep sshpass`
if [ ${#x} -eq 0 ] ; then
wget http://pkgs.repoforge.org/sshpass/sshpass-1.05-1.el3.rf.x86_64.rpm
rpm -Uvh sshpass-1.05-1.el3.rf.x86_64.rpm
rm -f sshpass-1.05-1.el3.rf.x86_64.rpm
fi
if [ -d ~/.ssh/ ]
then
echo '.ssh exist'
else
mkdir ~/.ssh/
chmod 700 ~/.ssh/
fi
if [ 'root' = `whoami` ]; then
echo #################################
echo #################################
echo #################################
echo #################################
echo #################################
#do extra job.
cat ~/_setting/password>~/.ssh/pass
else
#do extra job.
cat ~/_setting/password>~/.ssh/pass
fi
|
WTFace/hadoop2_cluster_install_for_centos
|
installSshpass.sh
|
Shell
|
mit
| 1,777 |
#!/usr/bin/env bash
sudo yum -y groupinstall 'Development Tools'
sudo yum -y install gmp gmp-devel db4 db4-devel ncurses ncurses-devel
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")
if [ "$SCRIPTPATH" = "/tmp" ] ; then
SCRIPTPATH=/vagrant
fi
mkdir -p $HOME/rpmbuild/{BUILD,RPMS,SOURCES,SRPMS}
ln -sf $SCRIPTPATH/SPECS $HOME/rpmbuild/SPECS
echo '%_topdir '$HOME'/rpmbuild' > $HOME/.rpmmacros
# Get GnuCOBOL source
wget http://downloads.sourceforge.net/project/open-cobol/gnu-cobol/1.1/gnu-cobol-1.1.tar.gz -P $HOME/rpmbuild/SOURCES/ -q
|
cfpb/gnucobol-el6-rpm
|
bootstrap.sh
|
Shell
|
mit
| 553 |
#!/usr/bin/env bash
# Copyright (c) 2016 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
### This script attempts to download the signature file SHA256SUMS.asc from
### digibytecore.org and digibyte.org and compares them.
### It first checks if the signature passes, and then downloads the files specified in
### the file, and checks if the hashes of these files match those that are specified
### in the signature file.
### The script returns 0 if everything passes the checks. It returns 1 if either the
### signature check or the hash check doesn't pass. If an error occurs the return value is 2
export LC_ALL=C
function clean_up {
for file in $*
do
rm "$file" 2> /dev/null
done
}
WORKINGDIR="/tmp/digibyte_verify_binaries"
TMPFILE="hashes.tmp"
SIGNATUREFILENAME="SHA256SUMS.asc"
RCSUBDIR="test"
HOST1="https://digibytecore.org"
HOST2="https://digibyte.io"
BASEDIR="/bin/"
VERSIONPREFIX="digibyte-core-"
RCVERSIONSTRING="rc"
if [ ! -d "$WORKINGDIR" ]; then
mkdir "$WORKINGDIR"
fi
cd "$WORKINGDIR" || exit 1
#test if a version number has been passed as an argument
if [ -n "$1" ]; then
#let's also check if the version number includes the prefix 'digibyte-',
# and add this prefix if it doesn't
if [[ $1 == "$VERSIONPREFIX"* ]]; then
VERSION="$1"
else
VERSION="$VERSIONPREFIX$1"
fi
STRIPPEDLAST="${VERSION%-*}"
#now let's see if the version string contains "rc" or a platform name (e.g. "osx")
if [[ "$STRIPPEDLAST-" == "$VERSIONPREFIX" ]]; then
BASEDIR="$BASEDIR$VERSION/"
else
# let's examine the last part to see if it's rc and/or platform name
STRIPPEDNEXTTOLAST="${STRIPPEDLAST%-*}"
if [[ "$STRIPPEDNEXTTOLAST-" == "$VERSIONPREFIX" ]]; then
LASTSUFFIX="${VERSION##*-}"
VERSION="$STRIPPEDLAST"
if [[ $LASTSUFFIX == *"$RCVERSIONSTRING"* ]]; then
RCVERSION="$LASTSUFFIX"
else
PLATFORM="$LASTSUFFIX"
fi
else
RCVERSION="${STRIPPEDLAST##*-}"
PLATFORM="${VERSION##*-}"
VERSION="$STRIPPEDNEXTTOLAST"
fi
BASEDIR="$BASEDIR$VERSION/"
if [[ $RCVERSION == *"$RCVERSIONSTRING"* ]]; then
BASEDIR="$BASEDIR$RCSUBDIR.$RCVERSION/"
fi
fi
else
echo "Error: need to specify a version on the command line"
exit 2
fi
#first we fetch the file containing the signature
WGETOUT=$(wget -N "$HOST1$BASEDIR$SIGNATUREFILENAME" 2>&1)
#and then see if wget completed successfully
if [ $? -ne 0 ]; then
echo "Error: couldn't fetch signature file. Have you specified the version number in the following format?"
echo "[$VERSIONPREFIX]<version>-[$RCVERSIONSTRING[0-9]] (example: ${VERSIONPREFIX}0.10.4-${RCVERSIONSTRING}1)"
echo "wget output:"
echo "$WGETOUT"|sed 's/^/\t/g'
exit 2
fi
WGETOUT=$(wget -N -O "$SIGNATUREFILENAME.2" "$HOST2$BASEDIR$SIGNATUREFILENAME" 2>&1)
if [ $? -ne 0 ]; then
echo "digibyte.org failed to provide signature file, but digibytecore.org did?"
echo "wget output:"
echo "$WGETOUT"|sed 's/^/\t/g'
clean_up $SIGNATUREFILENAME
exit 3
fi
SIGFILEDIFFS="$(diff $SIGNATUREFILENAME $SIGNATUREFILENAME.2)"
if [ "$SIGFILEDIFFS" != "" ]; then
echo "digibyte.org and digibytecore.org signature files were not equal?"
clean_up $SIGNATUREFILENAME $SIGNATUREFILENAME.2
exit 4
fi
#then we check it
GPGOUT=$(gpg --yes --decrypt --output "$TMPFILE" "$SIGNATUREFILENAME" 2>&1)
#return value 0: good signature
#return value 1: bad signature
#return value 2: gpg error
RET="$?"
if [ $RET -ne 0 ]; then
if [ $RET -eq 1 ]; then
#and notify the user if it's bad
echo "Bad signature."
elif [ $RET -eq 2 ]; then
#or if a gpg error has occurred
echo "gpg error. Do you have the DigiByte Core binary release signing key installed?"
fi
echo "gpg output:"
echo "$GPGOUT"|sed 's/^/\t/g'
clean_up $SIGNATUREFILENAME $SIGNATUREFILENAME.2 $TMPFILE
exit "$RET"
fi
if [ -n "$PLATFORM" ]; then
grep $PLATFORM $TMPFILE > "$TMPFILE-plat"
TMPFILESIZE=$(stat -c%s "$TMPFILE-plat")
if [ $TMPFILESIZE -eq 0 ]; then
echo "error: no files matched the platform specified" && exit 3
fi
mv "$TMPFILE-plat" $TMPFILE
fi
#here we extract the filenames from the signature file
FILES=$(awk '{print $2}' "$TMPFILE")
#and download these one by one
for file in $FILES
do
echo "Downloading $file"
wget --quiet -N "$HOST1$BASEDIR$file"
done
#check hashes
DIFF=$(diff <(sha256sum $FILES) "$TMPFILE")
if [ $? -eq 1 ]; then
echo "Hashes don't match."
echo "Offending files:"
echo "$DIFF"|grep "^<"|awk '{print "\t"$3}'
exit 1
elif [ $? -gt 1 ]; then
echo "Error executing 'diff'"
exit 2
fi
if [ -n "$2" ]; then
echo "Clean up the binaries"
clean_up $FILES $SIGNATUREFILENAME $SIGNATUREFILENAME.2 $TMPFILE
else
echo "Keep the binaries in $WORKINGDIR"
clean_up $TMPFILE
fi
echo -e "Verified hashes of \n$FILES"
exit 0
|
digibyte/digibyte
|
contrib/verifybinaries/verify.sh
|
Shell
|
mit
| 5,083 |
#!/bin/bash
# download selected debian-choot from docker image repository
# may not work sometimes (after major changes in docker's debian repo structure)
# In order to setup debian chroot with sandboxer, you need to do the following:
# 0. make sure you are running this steps as regular unpriveleged user (thats the point of using sandboxer).
# 1. make sure that sandboxer-fakeroot package is installed, if building from source - it will be built and installed with sandboxer suite
# 2. you may want to deploy chroot and sandboxer config files in a separate directory, just make sure that following files (or symlinks to it) are there:
# - download-debian-chroot.sh - this script, it will download and extract minimal debian distro image to "debian_sandbox" subdirectory
# - download-image-from-docker-repo.sh - helper script, used to download minimal debian chroot image from docker repository
# - debian-minimal-setup.sh - helper script, may be removed after chroot deploy
# - debian-setup.cfg.lua - sandboxer config file that may be used to alter debian chroot: run apt-get, install new packages, update configuration, etc. NOT FOR REGULAR USE
# - debian-version-probe.lua.in - helper script for debian\ubuntu-based setups, do not remove
# - debian-sandbox.cfg.lua - sandboxer config file for running regular applications, chroot-subdirectories will be mounted read-only as if running regular linux session with unpriveleged user
# 3. run "download-debian-chroot.sh" in order to download supported debian image (run without args in order to see usage info).
# 4. run "sandboxer debian-setup.cfg.lua fakeroot_shell" to start sandboxer-session with fakeroot emulating running this sandbox as root.
# 5. configure your new debian sandbox - install new application with apt, modify config files, etc...
# 5.a as alternative you may run "/root/debian-minimal-setup.sh" while running sandboxer's fakeroot shell to perform automatic setup of minimal sandbox with X11 suitable for desktop use
# 6. when done - just type "exit", if there is no active sessions running this chroot for a while - sandboxer will automatically terminate it's session manager running for this chroot and perform some cleanup.
# 6.a you may force-terminate all processes and session manager for this sandbox by executing "sandboxer-term debian-setup.cfg.lua" (from host system)
# 7. run "sandboxer debian-sandbox.cfg.lua shell" to start sandbox in a unpriveleged-user mode, you may run your own stuff by using this config file, see examples for more info
# NOTE: you may need to run "sandboxer-download-extra" script in order to download prebuilt binary components for run with older debian chroots - this is optional, do not run this if all working well. Downloaded components will be placed at ~/.cache/sandboxer , you may remove it if not needed. Prebuilt binaries updated not very often and it's my be outdated and not work as intended, however it may help to run ancient debian chroot on a never host system.
script_dir="$( cd "$( dirname "$0" )" && pwd )"
show_usage() {
echo "usage: download-debian-chroot.sh <distro's major version number or codename> [arch. only i386 and amd64 (default) supported now]"
exit 1
}
set -e
name="$1"
[[ -z $name ]] && show_usage
name=`echo "$name" | tr '[:upper:]' '[:lower:]'`
case "$name" in
"8"|"jessie")
name="jessie"
;;
"9"|"stretch")
name="stretch"
;;
"10"|"buster")
name="buster"
;;
"11"|"bullseye")
name="bullseye"
;;
"sid")
name="sid"
;;
*)
echo "selected debian distro name or version currently is not supported"
show_usage
;;
esac
arch="$2"
[[ -z $arch ]] && arch="amd64"
[[ $arch != amd64 && $arch != i386 ]] && \
echo "selected arch $arch is not supported for now and may not work with sandboxer!" && \
exit 1
echo "downloading debian $name with $arch arch from docker repository"
"$script_dir/download-image-from-docker-repo.sh" debian "$name" "$arch"
# remove apt configs needed only for docker (see https://github.com/docker/docker/blob/master/contrib/mkimage/debootstrap)
rm "$script_dir/debian_chroot/etc/apt/apt.conf.d/docker-"*
# deploy minimal setup script
cp "$script_dir/debian-minimal-setup.sh" "$script_dir/debian_chroot/root/debian-minimal-setup.sh"
# create exclude rules for dpkg if missing
if [[ ! -f "$script_dir/debian_chroot/etc/dpkg/dpkg.cfg.d/excludes" ]]; then
echo "creating rule for dpkg to exclude manuals and docs when installing packages"
echo "remove debian_chroot/etc/dpkg/dpkg.cfg.d/excludes file manually if you need them"
echo "path-exclude=/usr/share/man/*" > "$script_dir/debian_chroot/etc/dpkg/dpkg.cfg.d/excludes"
echo "path-exclude=/usr/share/doc/*" >> "$script_dir/debian_chroot/etc/dpkg/dpkg.cfg.d/excludes"
echo "path-include=/usr/share/doc/*/copyright" >> "$script_dir/debian_chroot/etc/dpkg/dpkg.cfg.d/excludes"
echo "path-include=/usr/share/doc/*/changelog.Debian.*" >> "$script_dir/debian_chroot/etc/dpkg/dpkg.cfg.d/excludes"
fi
if [[ $name = sid || $name = buster || $name = bullseye ]]; then
# modify config for apt, to make it work under fakeroot
echo "modifying apt config options to make it work with sandboxer/fakeoot restrictions"
echo "APT::Sandbox::Seccomp::Allow { \"socket\" };" > "$script_dir/debian_chroot/etc/apt/apt.conf.d/99-sandboxer"
echo "APT::Sandbox::Seccomp::Allow { \"connect\" };" >> "$script_dir/debian_chroot/etc/apt/apt.conf.d/99-sandboxer"
fi
|
DarkCaster/Sandboxer
|
Examples/download-debian-chroot.sh
|
Shell
|
mit
| 5,441 |
cd compiler; cargo build; cd ..
cd assembler; cargo build; cd ..
cd simulator; cargo build; cd ..
./compiler/target/debug/kcc tests/test.c > test.asm
./assembler/target/debug/kasm test.asm > test.bin
./simulator/target/debug/ksim test.bin > test.txt
diff test.txt tests/expect.txt
|
kivantium/kivantium
|
test.sh
|
Shell
|
mit
| 282 |
#!/bin/bash
# The iTerm2 customizations fall under the following license:
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# -- BEGIN ITERM2 CUSTOMIZATIONS --
if [[ "$ITERM_ENABLE_SHELL_INTEGRATION_WITH_TMUX""$TERM" != screen && "$ITERM_SHELL_INTEGRATION_INSTALLED" = "" && "$-" == *i* && "$TERM" != linux && "$TERM" != dumb ]]; then
if shopt extdebug | grep on > /dev/null; then
echo "iTerm2 Shell Integration not installed."
echo ""
echo "Your shell has 'extdebug' turned on."
echo "This is incompatible with shell integration."
echo "Find 'shopt -s extdebug' in bash's rc scripts and remove it."
return 0
fi
ITERM_SHELL_INTEGRATION_INSTALLED=Yes
# Saved copy of your PS1. This is used to detect if the user changes PS1
# directly. ITERM_PREV_PS1 will hold the last value that this script set PS1 to
# (including various custom escape sequences).
ITERM_PREV_PS1="$PS1"
# A note on execution. When you invoke a command at an interactive prompt the following steps are taken:
#
# 1. The DEBUG trap runs.
# It calls __bp_preexec_invoke_exec
# It runs any registered preexec_functions, including __iterm2_preexec
# 2. The command you executed runs.
# 3. PROMPT_COMMAND runs.
# It runs __bp_precmd_invoke_cmd, which is inserted as the first command in PROMPT_COMMAND.
# It calls any registered precmd_functions
# Then, pre-existing PROMPT_COMMANDs run
# 4. The prompt is shown.
#
# __iterm2_prompt_command used to be run from precmd_functions but then a pre-existing
# PROMPT_COMMAND could clobber the PS1 it modifies. Instead, add __iterm2_prompt_command as the last
# of the "preexisting" PROMPT_COMMANDs so it will be the very last thing done before the prompt is
# shown (unless someone amends PROMPT_COMMAND, but that is on them).
if [[ -n "$PROMPT_COMMAND" ]]; then
PROMPT_COMMAND+=$'\n'
fi;
PROMPT_COMMAND+='__iterm2_prompt_command'
# The following chunk of code, bash-preexec.sh, is licensed like this:
# The MIT License
#
# Copyright (c) 2015 Ryan Caloras and contributors (see https://github.com/rcaloras/bash-preexec)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Wrap bash-preexec.sh in a function so that, if it exits early due to having
# been sourced elsewhere, it doesn't exit our entire script.
_install_bash_preexec () {
# -- END ITERM2 CUSTOMIZATIONS --
# -- BEGIN BASH-PREEXEC.SH --
# bash-preexec.sh -- Bash support for ZSH-like 'preexec' and 'precmd' functions.
# https://github.com/rcaloras/bash-preexec
#
#
# 'preexec' functions are executed before each interactive command is
# executed, with the interactive command as its argument. The 'precmd'
# function is executed before each prompt is displayed.
#
# Author: Ryan Caloras ([email protected])
# Forked from Original Author: Glyph Lefkowitz
#
# V0.4.0
#
# General Usage:
#
# 1. Source this file at the end of your bash profile so as not to interfere
# with anything else that's using PROMPT_COMMAND.
#
# 2. Add any precmd or preexec functions by appending them to their arrays:
# e.g.
# precmd_functions+=(my_precmd_function)
# precmd_functions+=(some_other_precmd_function)
#
# preexec_functions+=(my_preexec_function)
#
# 3. Consider changing anything using the DEBUG trap or PROMPT_COMMAND
# to use preexec and precmd instead. Preexisting usages will be
# preserved, but doing so manually may be less surprising.
#
# Note: This module requires two Bash features which you must not otherwise be
# using: the "DEBUG" trap, and the "PROMPT_COMMAND" variable. If you override
# either of these after bash-preexec has been installed it will most likely break.
# Avoid duplicate inclusion
if [[ "${__bp_imported:-}" == "defined" ]]; then
return 0
fi
__bp_imported="defined"
# Should be available to each precmd and preexec
# functions, should they want it. $? and $_ are available as $? and $_, but
# $PIPESTATUS is available only in a copy, $BP_PIPESTATUS.
# TODO: Figure out how to restore PIPESTATUS before each precmd or preexec
# function.
__bp_last_ret_value="$?"
BP_PIPESTATUS=("${PIPESTATUS[@]}")
__bp_last_argument_prev_command="$_"
__bp_inside_precmd=0
__bp_inside_preexec=0
# Initial PROMPT_COMMAND string that is removed from PROMPT_COMMAND post __bp_install
__bp_install_string=$'__bp_trap_string="$(trap -p DEBUG)"\ntrap - DEBUG\n__bp_install'
# Fails if any of the given variables are readonly
# Reference https://stackoverflow.com/a/4441178
__bp_require_not_readonly() {
local var
for var; do
if ! ( unset "$var" 2> /dev/null ); then
echo "bash-preexec requires write access to ${var}" >&2
return 1
fi
done
}
# Remove ignorespace and or replace ignoreboth from HISTCONTROL
# so we can accurately invoke preexec with a command from our
# history even if it starts with a space.
__bp_adjust_histcontrol() {
local histcontrol
histcontrol="${HISTCONTROL//ignorespace}"
# Replace ignoreboth with ignoredups
if [[ "$histcontrol" == *"ignoreboth"* ]]; then
histcontrol="ignoredups:${histcontrol//ignoreboth}"
fi;
export HISTCONTROL="$histcontrol"
}
# This variable describes whether we are currently in "interactive mode";
# i.e. whether this shell has just executed a prompt and is waiting for user
# input. It documents whether the current command invoked by the trace hook is
# run interactively by the user; it's set immediately after the prompt hook,
# and unset as soon as the trace hook is run.
__bp_preexec_interactive_mode=""
__bp_trim_whitespace() {
local var=$@
var="${var#"${var%%[![:space:]]*}"}" # remove leading whitespace characters
var="${var%"${var##*[![:space:]]}"}" # remove trailing whitespace characters
echo -n "$var"
}
# Returns a copy of the passed in string trimmed of whitespace
# and removes any leading or trailing semi colons.
# Used for manipulating substrings in PROMPT_COMMAND
__bp_sanitize_string() {
local sanitized_string
sanitized_string=$(__bp_trim_whitespace "${1:-}")
sanitized_string=${sanitized_string%;}
sanitized_string=${sanitized_string#;}
sanitized_string=$(__bp_trim_whitespace "$sanitized_string")
echo -n "$sanitized_string"
}
# This function is installed as part of the PROMPT_COMMAND;
# It sets a variable to indicate that the prompt was just displayed,
# to allow the DEBUG trap to know that the next command is likely interactive.
__bp_interactive_mode() {
__bp_preexec_interactive_mode="on";
}
# This function is installed as part of the PROMPT_COMMAND.
# It will invoke any functions defined in the precmd_functions array.
__bp_precmd_invoke_cmd() {
# Save the returned value from our last command, and from each process in
# its pipeline. Note: this MUST be the first thing done in this function.
__bp_last_ret_value="$?" BP_PIPESTATUS=("${PIPESTATUS[@]}")
# Don't invoke precmds if we are inside an execution of an "original
# prompt command" by another precmd execution loop. This avoids infinite
# recursion.
if (( __bp_inside_precmd > 0 )); then
return
fi
local __bp_inside_precmd=1
# Invoke every function defined in our function array.
local precmd_function
for precmd_function in "${precmd_functions[@]}"; do
# Only execute this function if it actually exists.
# Test existence of functions with: declare -[Ff]
if type -t "$precmd_function" 1>/dev/null; then
__bp_set_ret_value "$__bp_last_ret_value" "$__bp_last_argument_prev_command"
# Quote our function invocation to prevent issues with IFS
"$precmd_function"
fi
done
}
# Sets a return value in $?. We may want to get access to the $? variable in our
# precmd functions. This is available for instance in zsh. We can simulate it in bash
# by setting the value here.
__bp_set_ret_value() {
return ${1:-}
}
__bp_in_prompt_command() {
local prompt_command_array
IFS=$'\n;' read -rd '' -a prompt_command_array <<< "$PROMPT_COMMAND"
local trimmed_arg
trimmed_arg=$(__bp_trim_whitespace "${1:-}")
local command
for command in "${prompt_command_array[@]:-}"; do
local trimmed_command
trimmed_command=$(__bp_trim_whitespace "$command")
# Only execute each function if it actually exists.
if [[ "$trimmed_command" == "$trimmed_arg" ]]; then
return 0
fi
done
return 1
}
# This function is installed as the DEBUG trap. It is invoked before each
# interactive prompt display. Its purpose is to inspect the current
# environment to attempt to detect if the current command is being invoked
# interactively, and invoke 'preexec' if so.
__bp_preexec_invoke_exec() {
# Save the contents of $_ so that it can be restored later on.
# https://stackoverflow.com/questions/40944532/bash-preserve-in-a-debug-trap#40944702
__bp_last_argument_prev_command="${1:-}"
# Don't invoke preexecs if we are inside of another preexec.
if (( __bp_inside_preexec > 0 )); then
return
fi
local __bp_inside_preexec=1
# Checks if the file descriptor is not standard out (i.e. '1')
# __bp_delay_install checks if we're in test. Needed for bats to run.
# Prevents preexec from being invoked for functions in PS1
if [[ ! -t 1 && -z "${__bp_delay_install:-}" ]]; then
return
fi
if [[ -n "${COMP_LINE:-}" ]]; then
# We're in the middle of a completer. This obviously can't be
# an interactively issued command.
return
fi
if [[ -z "${__bp_preexec_interactive_mode:-}" ]]; then
# We're doing something related to displaying the prompt. Let the
# prompt set the title instead of me.
return
else
# If we're in a subshell, then the prompt won't be re-displayed to put
# us back into interactive mode, so let's not set the variable back.
# In other words, if you have a subshell like
# (sleep 1; sleep 2)
# You want to see the 'sleep 2' as a set_command_title as well.
if [[ 0 -eq "${BASH_SUBSHELL:-}" ]]; then
__bp_preexec_interactive_mode=""
fi
fi
if __bp_in_prompt_command "${BASH_COMMAND:-}"; then
# If we're executing something inside our prompt_command then we don't
# want to call preexec. Bash prior to 3.1 can't detect this at all :/
__bp_preexec_interactive_mode=""
return
fi
local this_command
this_command=$(
export LC_ALL=C
HISTTIMEFORMAT= builtin history 1 | sed '1 s/^ *[0-9][0-9]*[* ] //'
)
# Sanity check to make sure we have something to invoke our function with.
if [[ -z "$this_command" ]]; then
return
fi
# If none of the previous checks have returned out of this function, then
# the command is in fact interactive and we should invoke the user's
# preexec functions.
# Invoke every function defined in our function array.
local preexec_function
local preexec_function_ret_value
local preexec_ret_value=0
for preexec_function in "${preexec_functions[@]:-}"; do
# Only execute each function if it actually exists.
# Test existence of function with: declare -[fF]
if type -t "$preexec_function" 1>/dev/null; then
__bp_set_ret_value ${__bp_last_ret_value:-}
# Quote our function invocation to prevent issues with IFS
"$preexec_function" "$this_command"
preexec_function_ret_value="$?"
if [[ "$preexec_function_ret_value" != 0 ]]; then
preexec_ret_value="$preexec_function_ret_value"
fi
fi
done
# Restore the last argument of the last executed command, and set the return
# value of the DEBUG trap to be the return code of the last preexec function
# to return an error.
# If `extdebug` is enabled a non-zero return value from any preexec function
# will cause the user's command not to execute.
# Run `shopt -s extdebug` to enable
__bp_set_ret_value "$preexec_ret_value" "$__bp_last_argument_prev_command"
}
__bp_install() {
# Exit if we already have this installed.
if [[ "${PROMPT_COMMAND:-}" == *"__bp_precmd_invoke_cmd"* ]]; then
return 1;
fi
trap '__bp_preexec_invoke_exec "$_"' DEBUG
# Preserve any prior DEBUG trap as a preexec function
local prior_trap=$(sed "s/[^']*'\(.*\)'[^']*/\1/" <<<"${__bp_trap_string:-}")
unset __bp_trap_string
if [[ -n "$prior_trap" ]]; then
eval '__bp_original_debug_trap() {
'"$prior_trap"'
}'
preexec_functions+=(__bp_original_debug_trap)
fi
# Adjust our HISTCONTROL Variable if needed.
__bp_adjust_histcontrol
# Issue #25. Setting debug trap for subshells causes sessions to exit for
# backgrounded subshell commands (e.g. (pwd)& ). Believe this is a bug in Bash.
#
# Disabling this by default. It can be enabled by setting this variable.
if [[ -n "${__bp_enable_subshells:-}" ]]; then
# Set so debug trap will work be invoked in subshells.
set -o functrace > /dev/null 2>&1
shopt -s extdebug > /dev/null 2>&1
fi;
local __bp_existing_prompt_command
# Remove setting our trap install string and sanitize the existing prompt command string
__bp_existing_prompt_command="${PROMPT_COMMAND//$__bp_install_string[;$'\n']}" # Edge case of appending to PROMPT_COMMAND
__bp_existing_prompt_command="${__bp_existing_prompt_command//$__bp_install_string}"
__bp_existing_prompt_command=$(__bp_sanitize_string "$__bp_existing_prompt_command")
# Install our hooks in PROMPT_COMMAND to allow our trap to know when we've
# actually entered something.
PROMPT_COMMAND=$'__bp_precmd_invoke_cmd\n'
if [[ -n "$__bp_existing_prompt_command" ]]; then
PROMPT_COMMAND+=${__bp_existing_prompt_command}$'\n'
fi;
PROMPT_COMMAND+='__bp_interactive_mode'
# Add two functions to our arrays for convenience
# of definition.
precmd_functions+=(precmd)
preexec_functions+=(preexec)
# Invoke our two functions manually that were added to $PROMPT_COMMAND
__bp_precmd_invoke_cmd
__bp_interactive_mode
}
# Sets an installation string as part of our PROMPT_COMMAND to install
# after our session has started. This allows bash-preexec to be included
# at any point in our bash profile.
__bp_install_after_session_init() {
# Make sure this is bash that's running this and return otherwise.
if [[ -z "${BASH_VERSION:-}" ]]; then
return 1;
fi
# bash-preexec needs to modify these variables in order to work correctly
# if it can't, just stop the installation
__bp_require_not_readonly PROMPT_COMMAND HISTCONTROL HISTTIMEFORMAT || return
local sanitized_prompt_command
sanitized_prompt_command=$(__bp_sanitize_string "$PROMPT_COMMAND")
if [[ -n "$sanitized_prompt_command" ]]; then
PROMPT_COMMAND=${sanitized_prompt_command}$'\n'
fi;
PROMPT_COMMAND+=${__bp_install_string}
}
# Run our install so long as we're not delaying it.
if [[ -z "${__bp_delay_install:-}" ]]; then
__bp_install_after_session_init
fi;
# -- END BASH-PREEXEC.SH --
}
_install_bash_preexec
unset -f _install_bash_preexec
# -- BEGIN ITERM2 CUSTOMIZATIONS --
# We don't care about whitespace, but users care about not changing their histcontrol variables.
# We overwrite the upstream __bp_adjust_histcontrol function whcih gets called from the next
# PROMPT_COMMAND invocation.
function __bp_adjust_histcontrol() {
true
}
function iterm2_begin_osc {
printf "\033]"
}
function iterm2_end_osc {
printf "\007"
}
function iterm2_print_state_data() {
iterm2_begin_osc
printf "1337;RemoteHost=%s@%s" "$USER" "$iterm2_hostname"
iterm2_end_osc
iterm2_begin_osc
printf "1337;CurrentDir=%s" "$PWD"
iterm2_end_osc
iterm2_print_user_vars
}
# Usage: iterm2_set_user_var key value
function iterm2_set_user_var() {
iterm2_begin_osc
printf "1337;SetUserVar=%s=%s" "$1" $(printf "%s" "$2" | base64 | tr -d '\n')
iterm2_end_osc
}
if [ -z "$(type -t iterm2_print_user_vars)" ] || [ "$(type -t iterm2_print_user_vars)" != function ]; then
# iterm2_print_user_vars is not already defined. Provide a no-op default version.
#
# Users can write their own version of this function. It should call
# iterm2_set_user_var but not produce any other output.
function iterm2_print_user_vars() {
true
}
fi
function iterm2_prompt_prefix() {
iterm2_begin_osc
printf "133;D;\$?"
iterm2_end_osc
}
function iterm2_prompt_mark() {
iterm2_begin_osc
printf "133;A"
iterm2_end_osc
}
function iterm2_prompt_suffix() {
iterm2_begin_osc
printf "133;B"
iterm2_end_osc
}
function iterm2_print_version_number() {
iterm2_begin_osc
printf "1337;ShellIntegrationVersion=16;shell=bash"
iterm2_end_osc
}
# If hostname -f is slow on your system, set iterm2_hostname before sourcing this script.
if [ -z "${iterm2_hostname:-}" ]; then
iterm2_hostname=$(hostname -f 2>/dev/null)
# some flavors of BSD (i.e. NetBSD and OpenBSD) don't have the -f option
if [ $? -ne 0 ]; then
iterm2_hostname=$(hostname)
fi
fi
# Runs after interactively edited command but before execution
__iterm2_preexec() {
# Save the returned value from our last command
__iterm2_last_ret_value="$?"
iterm2_begin_osc
printf "133;C;"
iterm2_end_osc
# If PS1 still has the value we set it to in iterm2_preexec_invoke_cmd then
# restore it to its original value. It might have changed if you have
# another PROMPT_COMMAND (like liquidprompt) that modifies PS1.
if [ -n "${ITERM_ORIG_PS1+xxx}" -a "$PS1" = "$ITERM_PREV_PS1" ]
then
export PS1="$ITERM_ORIG_PS1"
fi
iterm2_ran_preexec="yes"
__bp_set_ret_value "$__iterm2_last_ret_value" "$__bp_last_argument_prev_command"
}
# Prints the current directory and hostname control sequences. Modifies PS1 to
# add the FinalTerm A and B codes to locate the prompt.
function __iterm2_prompt_command () {
__iterm2_last_ret_value="$?"
# Work around a bug in CentOS 7.2 where preexec doesn't run if you press
# ^C while entering a command.
if [[ -z "${iterm2_ran_preexec:-}" ]]
then
__iterm2_preexec ""
fi
iterm2_ran_preexec=""
# This is an iTerm2 addition to try to work around a problem in the
# original preexec.bash.
# When the PS1 has command substitutions, this gets invoked for each
# substitution and each command that's run within the substitution, which
# really adds up. It would be great if we could do something like this at
# the end of this script:
# PS1="$(iterm2_prompt_prefix)$PS1($iterm2_prompt_suffix)"
# and have iterm2_prompt_prefix set a global variable that tells precmd not to
# output anything and have iterm2_prompt_suffix reset that variable.
# Unfortunately, command substitutions run in subshells and can't
# communicate to the outside world.
# Instead, we have this workaround. We save the original value of PS1 in
# $ITERM_ORIG_PS1. Then each time this function is run (it's called from
# PROMPT_COMMAND just before the prompt is shown) it will change PS1 to a
# string without any command substitutions by doing eval on ITERM_ORIG_PS1. At
# this point ITERM_PREEXEC_INTERACTIVE_MODE is still the empty string, so preexec
# won't produce output for command substitutions.
# The first time this is called ITERM_ORIG_PS1 is unset. This tests if the variable
# is undefined (not just empty) and initializes it. We can't initialize this at the
# top of the script because it breaks with liquidprompt. liquidprompt wants to
# set PS1 from a PROMPT_COMMAND that runs just before us. Setting ITERM_ORIG_PS1
# at the top of the script will overwrite liquidprompt's PS1, whose value would
# never make it into ITERM_ORIG_PS1. Issue 4532. It's important to check
# if it's undefined before checking if it's empty because some users have
# bash set to error out on referencing an undefined variable.
if [ -z "${ITERM_ORIG_PS1+xxx}" ]
then
# ITERM_ORIG_PS1 always holds the last user-set value of PS1.
# You only get here on the first time iterm2_preexec_invoke_cmd is called.
export ITERM_ORIG_PS1="$PS1"
fi
# If you want to generate PS1 dynamically from PROMPT_COMMAND, the best way
# to do it is to define a function named iterm2_generate_ps1 that sets PS1.
# Issue 5964. Other shells don't have this issue because they don't need
# such extremes to get precmd and preexec.
if [ -n "$(type -t iterm2_generate_ps1)" ] && [ "$(type -t iterm2_generate_ps1)" = function ]; then
iterm2_generate_ps1
fi
if [[ "$PS1" != "$ITERM_PREV_PS1" ]]
then
export ITERM_ORIG_PS1="$PS1"
fi
# Get the value of the prompt prefix, which will change $?
\local iterm2_prompt_prefix_value="$(iterm2_prompt_prefix)"
# Add the mark unless the prompt includes '$(iterm2_prompt_mark)' as a substring.
if [[ $ITERM_ORIG_PS1 != *'$(iterm2_prompt_mark)'* && x$ITERM2_SQUELCH_MARK = x ]]
then
iterm2_prompt_prefix_value="$iterm2_prompt_prefix_value$(iterm2_prompt_mark)"
fi
# Send escape sequences with current directory and hostname.
iterm2_print_state_data
# Reset $? to its saved value, which might be used in $ITERM_ORIG_PS1.
__bp_set_ret_value "$__iterm2_last_ret_value" "$__bp_last_argument_prev_command"
# Set PS1 to various escape sequences, the user's preferred prompt, and more escape sequences.
export PS1="\[$iterm2_prompt_prefix_value\]$ITERM_ORIG_PS1\[$(iterm2_prompt_suffix)\]"
# Save the value we just set PS1 to so if the user changes PS1 we'll know and we can update ITERM_ORIG_PS1.
export ITERM_PREV_PS1="$PS1"
__bp_set_ret_value "$__iterm2_last_ret_value" "$__bp_last_argument_prev_command"
}
# Install my function
preexec_functions+=(__iterm2_preexec)
iterm2_print_state_data
iterm2_print_version_number
fi
# -- END ITERM2 CUSTOMIZATIONS --
|
weichuliu/dots
|
iterm2_shell_integration.bash
|
Shell
|
mit
| 23,765 |
#16x16 has to be done manually
sizesNoAA="20 32"
sizesAA="40 44 46 50 64 128 150 256 512"
names=""
for i in $sizesNoAA
do
convert +antialias -background none plug.svg -filter Lanczos -resize ${i}x${i} png8:icon_${i}x${i}.png
names=$names" icon_"${i}x${i}".png"
done
for i in $sizesAA
do
convert +antialias -background none plug.svg -resize ${i}x${i} icon_${i}x${i}.png
names=$names" icon_"${i}x${i}".png"
done
names=$names" icon_16x16.png"
convert $names Tray.ico
|
petrroll/PowerSwitcher
|
PowerSwitcher.Assets/create_icon.sh
|
Shell
|
mit
| 488 |
#!/bin/bash
APP=pybitmessage
PREV_VERSION=0.4.2
VERSION=0.4.2
RELEASE=1
SOURCEDIR=.
ARCH_TYPE=`uname -m`
CURRDIR=`pwd`
SOURCE=~/rpmbuild/SOURCES/${APP}_${VERSION}.orig.tar.gz
# Update version numbers automatically - so you don't have to
sed -i 's/VERSION='${PREV_VERSION}'/VERSION='${VERSION}'/g' Makefile debian.sh arch.sh puppy.sh ebuild.sh slack.sh
sed -i 's/Version: '${PREV_VERSION}'/Version: '${VERSION}'/g' rpmpackage/${APP}.spec
sed -i 's/Release: '${RELEASE}'/Release: '${RELEASE}'/g' rpmpackage/${APP}.spec
sed -i 's/pkgrel='${RELEASE}'/pkgrel='${RELEASE}'/g' archpackage/PKGBUILD
sed -i 's/pkgver='${PREV_VERSION}'/pkgver='${VERSION}'/g' archpackage/PKGBUILD
sed -i "s/-${PREV_VERSION}-/-${VERSION}-/g" puppypackage/*.specs
sed -i "s/|${PREV_VERSION}|/|${VERSION}|/g" puppypackage/*.specs
sed -i 's/VERSION='${PREV_VERSION}'/VERSION='${VERSION}'/g' puppypackage/pinstall.sh puppypackage/puninstall.sh
sed -i 's/-'${PREV_VERSION}'.so/-'${VERSION}'.so/g' debian/*.links
sudo yum groupinstall "Development Tools"
sudo yum install rpmdevtools
# Setup the rpmbuild directory tree
rpmdev-setuptree
# Create the source code in the SOURCES directory
make clean
mkdir -p ~/rpmbuild/SOURCES
rm -f ${SOURCE}
# Having the root directory called name-version seems essential
mv ../${APP} ../${APP}-${VERSION}
tar -cvzf ${SOURCE} ../${APP}-${VERSION} --exclude-vcs
# Rename the root directory without the version number
mv ../${APP}-${VERSION} ../${APP}
# Copy the spec file into the SPECS directory
cp -f rpmpackage/${APP}.spec ~/rpmbuild/SPECS
# Build
cd ~/rpmbuild/SPECS
rpmbuild -ba ${APP}.spec
cd ${CURRDIR}
# Copy the results into the rpmpackage directory
mkdir -p rpmpackage/${ARCH_TYPE}
cp -r ~/rpmbuild/RPMS/${ARCH_TYPE}/${APP}* rpmpackage/${ARCH_TYPE}
cp -r ~/rpmbuild/SRPMS/${APP}* rpmpackage
|
domob1812/PyBitmessage
|
rpm.sh
|
Shell
|
mit
| 1,811 |
# F1 main
echo "Running Baseline experiment"
python validation.py --interpro --pfam --cc --bp --mf --binary --verbose --model=LogisticRegression --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=baseline
echo "Running Random Forest experiment"
python validation.py --interpro --pfam --cc --bp --mf --binary --verbose --model=RandomForestClassifier --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=randomforest
echo "Running Ternary encoding experiment"
python validation.py --interpro --pfam --cc --bp --mf --verbose --model=LogisticRegression --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=ternary
echo "Running Inducer experiment"
python validation.py --interpro --pfam --cc --bp --mf --binary --verbose --induce --model=LogisticRegression --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=inducer
# Individual
echo "Running isolated GO features experiment"
python validation.py --cc --bp --mf --binary --verbose --model=LogisticRegression --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=isolated_go
echo "Running isolated induced GO features experiment"
python validation.py --cc --bp --mf --binary --induce --verbose --model=LogisticRegression --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=isolated_inducer
echo "Running isolated InterPro features experiment"
python validation.py --interpro --binary --model=LogisticRegression --verbose --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=isolated_interpro
echo "Running isolated Pfam features experiment"
python validation.py --pfam --binary --model=LogisticRegression --verbose --n_jobs=4 --h_iterations=30 --n_iterations=3 --n_splits=5 --output_folder=isolated_pfam
|
daniaki/pyPPI
|
scripts/run_val_exp.sh
|
Shell
|
mit
| 1,814 |
sudo apt-get install -y curl software-properties-common build-essential
curl -sL https://deb.nodesource.com/setup_6.x | sudo bash -
sudo apt-get install -y nodejs
|
krandalf75/linux-utils
|
debian9/nodejs.sh
|
Shell
|
mit
| 165 |
#!/bin/bash
set -e
echo "Building application"
go list ./... | grep -v /vendor/ | grep -v bindata_assetfs.go | xargs -L1 go vet
go list ./... | grep -v /vendor/ | grep -v bindata_assetfs.go | xargs -L1 go vet
go fmt ./...
go test ./... --cover
go install
|
quii/mockingjay-server
|
build.sh
|
Shell
|
mit
| 258 |
## [name]
## Alias of "run".
@ACTIONS.run "$@"
|
reduardo7/docker-dev-base
|
src/actions/start.sh
|
Shell
|
mit
| 47 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-11_MagicalRecord/MagicalRecord.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-11_MagicalRecord/MagicalRecord.framework"
fi
|
iosprogrammingwithswift/iosprogrammingwithswift
|
18_MagicalRecord/Pods/Target Support Files/Pods-11_MagicalRecord/Pods-11_MagicalRecord-frameworks.sh
|
Shell
|
mit
| 3,562 |
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
# set -o xtrace
if [[ $# -eq 0 ]] ; then
echo "Please supply a version, e.g. 'v3'"
exit 1
fi
# This script uploads docs to a specified archive version.
SPATH="/archive/$1"
aws configure set preview.cloudfront true
aws --profile mitmproxy \
s3 sync --acl public-read ./public s3://docs.mitmproxy.org$SPATH
aws --profile mitmproxy \
cloudfront create-invalidation --distribution-id E1TH3USJHFQZ5Q \
--paths "$SPATH/*"
|
vhaupert/mitmproxy
|
docs/upload-archive.sh
|
Shell
|
mit
| 507 |
$INSTANS_HOME/bin/instans --prefix-encoding=true -r queries/construct-event-output.rq -r queries/EPA-All.rq --input-blocks=data/CEP2SPARQL_SamplePattern.trig --allow-rule-instance-removal=false --rdf-operations=add:execute-snapshot:remove:execute:flush --time=- --input-blocks=data/CEP2SPARQL_SampleEvents.trig
|
aaltodsg/instans-cep2sparql
|
cep2sparql.sh
|
Shell
|
mit
| 311 |
source ./library-symlinks.sh
symlink_lib 'client' 'event_store'
|
obsidian-btc/event-store-client
|
symlink-lib.sh
|
Shell
|
mit
| 65 |
#!/bin/bash
gcc test.c
./a.out | head -c 4000000 > output.raw
sox -r 24000 -c 1 -t u8 output.raw output.wav
open output.wav
|
cpmpercussion/evobytebeat
|
makeoutput.sh
|
Shell
|
mit
| 125 |
#NOT USING THIS FILE ANYMORE
#!/bin/sh
#what pin are we interested in?
PIN="$1"
#setup
/usr/local/bin/gpio mode $PIN in
/usr/local/bin/gpio mode $PIN down
#first...
#echo `/usr/local/bin/gpio read $PIN` > /tmp/gpio$PIN
#loop
LAST=-1
while true; do
VAL=`/usr/local/bin/gpio read $PIN`
if [ "$VAL" != "$LAST" ]
then
echo "$VAL"
fi
LAST="$VAL"
sleep 0.5
done
|
albennett/magic-mirror-pi
|
gpio/readNode.sh
|
Shell
|
mit
| 378 |
config() {
NEW="$1"
OLD="$(dirname $NEW)/$(basename $NEW .new)"
# If there's no config file by that name, mv it over:
if [ ! -r $OLD ]; then
mv $NEW $OLD
elif [ "$(cat $OLD | md5sum)" = "$(cat $NEW | md5sum)" ]; then
# toss the redundant copy
rm $NEW
fi
# Otherwise, we leave the .new copy for the admin to consider...
}
config etc/pydfrc.new
|
panosmdma/SlackOnly-SlackBuilds
|
python/pydf/doinst.sh
|
Shell
|
mit
| 370 |
export APP_HOME=..
TAG=EFW_L3_B
export LD_LIBRARY_PATH=$APP_HOME:$LD_LIBRARY_PATH
export LUA_PATH=$APP_HOME/?.lua
export LUA_CPATH=$APP_HOME/bin/?.dll
export IN_FOLDER=$APP_HOME/test/in
export OUT_FOLDER=$APP_HOME/test/out
export C3_CP_EFW_L3_E=$IN_FOLDER/C3_CP_EFW_L3_E__20020808_000000_20020808_060000_V110505.cef
export C3_CP_AUX_POSGSE_1M=$IN_FOLDER/C3_CP_AUX_POSGSE_1M__20020808_000000_20020808_060000_V091203.cef
export C3_CP_FGM_FULL=$IN_FOLDER/C3_CP_FGM_FULL__20020808_000000_20020808_060000_V060624.cef
export CL_SP_AUX=$IN_FOLDER/CL_SP_AUX__20020808_000000_20020808_060000_V061128.cef
NOW=$(date +"%Y%m%d-%H%M%S")
#export OUT_FOLDER_NOW=$OUT_FOLDER/$TAG'_'$NOW
export OUT_FOLDER_NOW=$OUT_FOLDER/$NOW'_'$TAG
mkdir $OUT_FOLDER_NOW
cd ../bin
./luajit ./efw_L3_B.lua $C3_CP_EFW_L3_E $C3_CP_AUX_POSGSE_1M $C3_CP_FGM_FULL $CL_SP_AUX $OUT_FOLDER_NOW
|
caa-dev-apps/libcef_v2
|
app/test/efw_L3_B.sh
|
Shell
|
mit
| 871 |
#!/bin/bash
if [ -z "$CATALINA_HOME" ]; then
echo "CATALINA_HOME must be specified"
exit 1
fi
echo "CATALINA_HOME=[$CATALINA_HOME]"
sh "$CATALINA_HOME/bin/shutdown.sh"
rm -rf "$CATALINA_HOME/webapps/kuzoff-ws"
rm "$CATALINA_HOME/webapps/kuzoff-ws.war"
cp build/libs/kuzoff-ws.war "$CATALINA_HOME/webapps"
|
cyber-waste/kuzoff
|
webservice-server/src/build/shell/deploy.sh
|
Shell
|
mit
| 316 |
#!/bin/sh
echo "Please install Xcode from:"
open "https://itunes.apple.com/us/app/xcode/id497799835?ls=1&mt=12"
read -p "Press enter to continue"
echo "Please install Sunrise Calendar from:"
open "https://itunes.apple.com/us/app/sunrise-calendar/id886106985?ls=1&mt=12"
read -p "Press enter to continue"
# Install Homebrew popular OS X package manager
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
ln -s "$(brew --prefix)/Library/Contributions/brew_zsh_completion.zsh" /usr/local/share/zsh/site-functions
/usr/local/bin/brew install vcsh mr
# Pull dotfiles
vcsh clone https://github.com/pftg/dotfiles.git
cd ~/.dotfiles/osx-bootstrap/
xcode-select --install
sudo xcodebuild -license
# Setup Homebrew
brew tap Homebrew/bundle
brew bundle
# Install compiled phantomjs 1.9.8
brew cask install https://raw.githubusercontent.com/caskroom/homebrew-cask/b234ca9329525eca21012c76a5bc2b69a9d15a8d/Casks/phantomjs.rb
# Install compiled phantomjs 2.0.0
brew cask install phantomjs
/usr/local/bin/brew install zsh
chsh -s /usr/local/bin/zsh pftg
# Setup ZSH
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# Setup Log rotation
ln -s `realpath ./etc/newsyslog.d/io.pftg.dev.conf` /etc/newsyslog.d/io.pftg.dev.conf
# SSD tricks
source osx-noatime.sh
source osx-ram-disk-for-tmp.sh
# Setup OSX defaults
cd ~/.dotfiles/osx-bootstrap/
git clone [email protected]:pftg/mathiasbynens-dotfiles.git
cd mathiasbynens-dotfiles
source .osx
|
pftg/dotfiles
|
.dotfiles/osx-bootstrap/osx-bootstrap.sh
|
Shell
|
mit
| 1,523 |
#!/usr/bin/env bash
set -e
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
echo "Checking .env file..."
if [ ! -e "$ROOT"/docker/.env ]; then
echo "No docker/.env file found. Creating from .env.example..."
cp "$ROOT"/docker/.env.example "$ROOT"/docker/.env
echo "NOTE: Please review the docker/.env file and configure any required parameters."
else
echo "docker/.env file exists. Skipping create."
echo "NOTE: Please review the docker/.env.example file and ensure all required parameters are present in docker/.env"
fi
|
Medology/FlexibleMink
|
bin/init/init_docker_env.sh
|
Shell
|
mit
| 547 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:1893
#
# Security announcement date: 2015-10-15 11:55:57 UTC
# Script generation date: 2017-01-11 21:26:15 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - flash-plugin.i686:11.2.202.535-1.el6_7
#
# Last versions recommanded by security team:
# - flash-plugin.i686:24.0.0.194-1.el6_8
#
# CVE List:
# - CVE-2015-5569
# - CVE-2015-7625
# - CVE-2015-7626
# - CVE-2015-7627
# - CVE-2015-7628
# - CVE-2015-7629
# - CVE-2015-7630
# - CVE-2015-7631
# - CVE-2015-7632
# - CVE-2015-7633
# - CVE-2015-7634
# - CVE-2015-7643
# - CVE-2015-7644
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install flash-plugin.i686-24.0.0.194 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2015/RHSA-2015:1893.sh
|
Shell
|
mit
| 868 |
#!/bin/bash
pip install -r '../requirements.txt'
|
joebowen/movement_validation_cloud
|
scripts/install_dependencies.sh
|
Shell
|
mit
| 51 |
#!/bin/bash
PARA_WATCHER_FILE='/data/nginxhtml/cgi_bin_security/nohup.out'
PRAR_PROC_NAME='spawn-fcgi-watcher'
PRAR_PID_FILE='/data/nginxhtml/cgi_bin_security/server.pid'
PARA_HOST_ID=""
PARA_SERVER_ID="cgi_bin_security"
CMD_GREP='/bin/grep'
CMD_AWK='/usr/bin/awk'
CMD_SED='/bin/sed'
cd `dirname $0`
SUB_PID=`ps -eo pid,args | "$CMD_GREP" -v grep | "$CMD_GREP" $PRAR_PROC_NAME | "$CMD_GREP" $PRAR_PID_FILE | "$CMD_AWK" '{print $1}'`
if [ -z "$SUB_PID" ];then
echo server not running.
exit 1
fi
tail --lines=0 --max-unchanged-stats=5 --pid="$SUB_PID" --sleep-interval=60 -F "$PARA_WATCHER_FILE" | \
while read SUB_LINE;
do
SUB_INFO=`echo -n $SUB_LINE | "$CMD_GREP" 'unusually'`
if [ -n "$SUB_INFO" ];then
./sendmail.sh 3 "_安全中心-CORE-日志" "ERROR : [`date +%Y-%m-%d_%H:%M:%S`] : ($PARA_HOST_ID:$PARA_SERVER_ID)$SUB_LINE"
fi
# SUB_INFO=`echo -n $SUB_LINE | "$CMD_GREP" 'child spawned successfully' | $CMD_AWK -FPID: '{print $2}' | bc 2>/dev/null`
# if [ -n "$SUB_INFO" ] && [ "$SUB_INFO" -gt 0 ];then
# ./ErrMonitor.sh "$SUB_INFO" &
# fi
done
|
switch-st/spawn-fcgi-watcher
|
script/Monitor/ProcMonitor.sh
|
Shell
|
mit
| 1,063 |
#!/bin/bash
echo "===== started mh_db_auto_backup.sh ======"
source /var/www/mh-hunt-helper/DB/config.sh
cd /keybase/public/devjacksmith/mh_backups/weekly
date > last_updated.txt
# Hunt Helper
echo "====== Backing up hunt helper ====="
if [ -f hunthelper_weekly.sql.gz ]; then
rm hunthelper_weekly.sql.gz
fi
if [ -f hunthelper_weekly.txt.zip ]; then
rm hunthelper_weekly.txt.zip
fi
echo "=== Turning off even scheduler ==="
mysql -u $MH_USER -p$MH_PASS -e "SET GLOBAL event_scheduler = OFF;"
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --events --routines mhhunthelper | gzip -9 > hunthelper_weekly.sql.gz
sleep 5s
rm -rf /var/lib/mysql-files/*
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --events --routines -T /var/lib/mysql-files/ --no-create-info --compatible=db2 mhhunthelper
rm -rf /var/lib/mysql-files/*.sql
zip -j -9 hunthelper_weekly.txt.zip /var/lib/mysql-files/*
rm -rf /var/lib/mysql-files/*
# Map Spotter
echo "===== Backing up map spotter ====="
if [ -f mapspotter_weekly.sql.gz ]; then
rm mapspotter_weekly.sql.gz
fi
if [ -f mapspotter_weekly.txt.zip ]; then
rm mapspotter_weekly.txt.zip
fi
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --ignore-table=mhmapspotter.fb_users --ignore-table=mhmapspotter.fb_groups --events --routines mhmapspotter | gzip -9 > mapspotter_weekly.sql.gz
sleep 5s
rm -rf /var/lib/mysql-files/*
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --ignore-table=mhmapspotter.fb_users --ignore-table=mhmapspotter.fb_groups --events --routines -T /var/lib/mysql-files/ --no-create-info --compatible=db2 mhmapspotter
rm -rf /var/lib/mysql-files/*.sql
zip -j -9 mapspotter_weekly.txt.zip /var/lib/mysql-files/*
rm -rf /var/lib/mysql-files/*
# Converter
echo "===== Backing up converter ====="
if [ -f converter_weekly.sql.gz ]; then
rm converter_weekly.sql.gz
fi
if [ -f converter_weekly.txt.zip ]; then
rm converter_weekly.txt.zip
fi
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --events --routines mhconverter --ignore-table=mhconverter.entries | gzip -9 > converter_weekly.sql.gz
sleep 5s
rm -rf /var/lib/mysql-files/*
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --events --routines -T /var/lib/mysql-files/ --no-create-info --compatible=db2 mhconverter --ignore-table=mhconverter.entries
rm -rf /var/lib/mysql-files/*.sql
zip -j -9 converter_weekly.txt.zip /var/lib/mysql-files/*
rm -rf /var/lib/mysql-files/*
# Map Helper
echo "===== Backing up map helper ====="
if [ -f maphelper_weekly.sql.gz ]; then
rm maphelper_weekly.sql.gz
fi
if [ -f maphelper_weekly.txt.zip ]; then
rm maphelper_weekly.txt.zip
fi
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --events --routines mhmaphelper --ignore-table=mhmaphelper.users | gzip -9 > maphelper_weekly.sql.gz
sleep 5s
rm -rf /var/lib/mysql-files/*
mysqldump -u $MH_USER -p$MH_PASS --host=127.0.0.1 --skip-lock-tables --events --routines -T /var/lib/mysql-files/ --no-create-info --compatible=db2 mhmaphelper --ignore-table=mhmaphelper.users
rm -rf /var/lib/mysql-files/*.sql
zip -j -9 maphelper_weekly.txt.zip /var/lib/mysql-files/*
rm -rf /var/lib/mysql-files/*
echo "=== Turning on even scheduler ==="
mysql -u $MH_USER -p$MH_PASS -e "SET GLOBAL event_scheduler = ON;"
echo "===== finished mh_db_auto_backup.sh ====="
|
DevJackSmith/mh-hunt-helper
|
DB/mh_db_auto_backup.sh
|
Shell
|
mit
| 3,430 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3420-1
#
# Security announcement date: 2015-12-15 00:00:00 UTC
# Script generation date: 2017-01-29 21:06:15 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - bind9:1:9.8.4.dfsg.P1-6+nmu2+deb7u8
#
# Last versions recommanded by security team:
# - bind9:1:9.8.4.dfsg.P1-6+nmu2+deb7u14
#
# CVE List:
# - CVE-2015-8000
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade bind9=1:9.8.4.dfsg.P1-6+nmu2+deb7u14 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2015/DSA-3420-1.sh
|
Shell
|
mit
| 653 |
#!/bin/bash
set -ex
sudo dnf install -y @c-development cmake pkgconfig gcc-c++ \
{wxGTK3,libnova,zlib,libusb}{,-devel}
OPENPHD_VER=2.6.5
prefix=/opt/phd2
cd $(mktemp -d)
curl -L https://github.com/OpenPHDGuiding/phd2/archive/v${OPENPHD_VER}.tar.gz | tar xzf -
cd phd2-${OPENPHD_VER}
mkdir -p tmp && cd tmp
cmake -DCMAKE_INSTALL_PREFIX="$prefix" ..
# do not add -j4 here -- may cause compilation conflicts
make
read -p 'press ENTER to install PHD2'
sudo make install
sudo dnf remove {wxGTK3,libnova,zlib,libusb}-devel || echo nope
|
gronki/fedora-setup
|
build-scripts/build-phd2.sh
|
Shell
|
mit
| 540 |
#!/bin/bash
set -e
script_dir="$( cd "$( dirname "$0" )" && pwd )"
cd "$script_dir"
if [[ -z $PREFIX ]]; then
PREFIX="$1"
[[ -z $PREFIX ]] && PREFIX="/usr/local"
fi
if [[ -z $CONFIG_DIR ]]; then
CONFIG_DIR="$2"
[[ -z $CONFIG_DIR ]] && CONFIG_DIR="/etc"
fi
mkdir -p "$PREFIX/bin"
cp "$script_dir/clr-ucode-updater.sh" "$PREFIX/bin/clr-ucode-updater.sh"
sed -i "s|__etc|$CONFIG_DIR|g" "$PREFIX/bin/clr-ucode-updater.sh"
chmod 755 "$PREFIX/bin/clr-ucode-updater.sh"
mkdir -p "$CONFIG_DIR"
if [[ ! -f "$CONFIG_DIR/clr-ucode-updater.cfg" ]]; then
cp "$script_dir/clr-ucode-updater.cfg" "$CONFIG_DIR/clr-ucode-updater.cfg"
chmod 644 "$CONFIG_DIR/clr-ucode-updater.cfg"
fi
|
DarkCaster/Linux-Helper-Tools
|
ClearLinuxUcodeUpdater/install.sh
|
Shell
|
mit
| 684 |
#!/bin/bash
set -ex
cd ${0%/*}/../
ansible-playbook tests/test.yml -i tests/inventory --syntax-check
|
ssato/ansible-playbooks
|
roles/kvm/tests/run.sh
|
Shell
|
mit
| 102 |
# Get access token
AT=`curl -s -X POST -H 'content-type: application/x-www-form-urlencoded' 'https://api.mercadopago.com/oauth/token' -d 'grant_type=client_credentials' -d 'client_id=CLIENT_ID' -d 'client_secret=CLIENT_SECRET' | grep -o '"access_token":"[^"]*"' | sed -n 's/.*"access_token":"\(.*\)"/\1/p'`
curl -X PUT \
-H "Content-Type: application/json" \
'https://api.mercadopago.com/collections/:ID?access_token=$AT' \
-d '{"status":"cancelled"}'
|
matikbird/matikbird.github.io
|
portfolio/quay/back_end/payments2/mercadopago/api-mercadopago-master/templates/code-examples-master/refund-and-cancellation/bash/cancel.sh
|
Shell
|
mit
| 452 |
#!/bin/bash
export PYTHONPATH="./pypy-source"
# Make with -O0:
# pypy pypy-source\rpython\bin\rpython -O0 --gc=hybrid dipper.py
# Make with -O2:
pypy pypy-source\rpython\bin\rpython dipper.py
|
juddc/Dipper
|
make.sh
|
Shell
|
mit
| 195 |
#!/bin/bash
echo "Generating trevecca.html ..."
./trevecca.py > trevecca.html
echo "Generating trevecca_wkhtmltopdf.pdf ..."
wkhtmltopdf --footer-right "[page]" --dump-outline trevecca_wkhtmltopdf_toc.xml trevecca.html trevecca_wkhtmltopdf.pdf
echo "Generating wkhtmltopdf_toc.xsl ..."
wkhtmltopdf --dump-default-toc-xsl > wkhtmltopdf_toc.xsl
echo "Generating trevecca_wkhtmltopdf_toc.html ..."
xsltproc wkhtmltopdf_toc.xsl trevecca_wkhtmltopdf_toc.xml > trevecca_wkhtmltopdf_toc.html
echo "Generating trevecca_wkhtmltopdf_toc.pdf ..."
wkhtmltopdf trevecca_wkhtmltopdf_toc.html trevecca_wkhtmltopdf_toc.pdf
echo "Generating trevecca_wkhtmltopdf_with_TOC.pdf ..."
/System/Library/Automator/Combine\ PDF\ Pages.action/Contents/Resources/join.py --output trevecca_wkhtmltopdf_with_TOC.pdf trevecca_wkhtmltopdf_toc.pdf trevecca_wkhtmltopdf.pdf
echo "Done."
|
welcheb/trevecca_undergraduate_catalog_2015-16
|
trevecca.sh
|
Shell
|
mit
| 861 |
#! /bin/bash
#
# Simple rmdup loop
#
# sambamba_rmdup.sh < bamlist
#
# Example
#
# ls -1 --color=never *.bam | grep -v _rmdup | /hpc/local/CentOS6/cog_bioinf/CuppenResearch/somatic_pipeline/scripts/sambamba_rmdup.sh
sambamba=sambamba
onceonly=$HOME/izip/git/opensource/ruby/once-only/bin/once-only
while read bam ; do
echo ==== $sambamba remove duplicates $bam...
outfn=$(basename $bam .bam)_rmdup.bam
# echo "$sambamba markdup -r $bam $outfn"| $onceonly --pfff --pbs '-q veryshort' -d . -v -in $bam --out $outfn
echo "$sambamba markdup -r $bam $outfn"| $onceonly -v --pfff -d . -in $bam --out $outfn
[ $? -ne 0 ] && exit 1
done
|
CuppenResearch/somatic_pipeline
|
scripts/sambamba_rmdup.sh
|
Shell
|
mit
| 647 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-425-1
#
# Security announcement date: 2016-02-23 00:00:00 UTC
# Script generation date: 2017-01-01 21:09:08 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libssh:0.4.5-3+squeeze3
#
# Last versions recommanded by security team:
# - libssh:0.4.5-3+squeeze3
#
# CVE List:
# - CVE-2016-0739
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libssh=0.4.5-3+squeeze3 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/i386/2016/DLA-425-1.sh
|
Shell
|
mit
| 615 |
#!/bin/bash
[ "$(_get_tmux_major_version)" -eq "1" ]
|
eviljoe/junk-n-stuff
|
src/tmuxlib/_is_tmux_v1.sh
|
Shell
|
mit
| 55 |
if [ -z "$KALS_BRANCH" ]; then KALS_BRANCH=voc4fun-server/master; fi
if [ -z "$KALS_PATH" ]; then KALS_PATH=/var/www/voc4fun-server; fi
if [ -z "$KALS_DIR" ]; then KALS_DIR=/var/www; fi
#echo $KALS_DIR
#cd $KALS_DIR
git clone git://github.com/pulipulichen/voc4fun-server.git "$KALS_PATH"
git reset --hard origin/master
|
pulipulichen/voc4fun-server
|
git-scripts/clone.sh
|
Shell
|
mit
| 320 |
#!/bin/bash
# wget https://github.com/python/cpython/archive/3.5.zip -O python.zip
# find cpython-3.5/Doc/ -name "*.rst" | grep -v whatsnew | xargs -I{} cat "{}" | perl -ne 'm{method::\s*([^\(\)]*)\s*} && print "$1\n";'
|
wsdookadr/news
|
kwtech/python.sh
|
Shell
|
mit
| 220 |
#!/bin/bash
START=$(date)
export OMP_NUM_THREADS=8
time ./compute_projection_correction.run ../config/DLA_DR9_bin2Mpc_80Mpc_sampleC2_cosmoAndreu.ini
time ./compute_lya1d.run ../config/DLA_DR9_bin2Mpc_80Mpc_sampleC2_cosmoAndreu.ini
time ./correlation.run ../config/DLA_DR9_bin2Mpc_80Mpc_sampleC2_cosmoAndreu.ini
echo 'start: '+$START
echo 'end: '+$(date)
|
iprafols/cross_correlations
|
programs/DLA_DR9_bin2Mpc_80Mpc_sampleC2_cosmoAndreu.sh
|
Shell
|
mit
| 358 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3185-1
#
# Security announcement date: 2017-02-01 00:00:00 UTC
# Script generation date: 2017-02-03 21:04:22 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libxpm4:1:3.5.9-4ubuntu0.1
# - libxpm4-dbg:1:3.5.9-4ubuntu0.1
# - libxpm-dev:1:3.5.9-4ubuntu0.1
#
# Last versions recommanded by security team:
# - libxpm4:1:3.5.9-4ubuntu0.1
# - libxpm4-dbg:1:3.5.9-4ubuntu0.1
# - libxpm-dev:1:3.5.9-4ubuntu0.1
#
# CVE List:
# - CVE-2016-10164
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libxpm4=1:3.5.9-4ubuntu0.1 -y
sudo apt-get install --only-upgrade libxpm4-dbg=1:3.5.9-4ubuntu0.1 -y
sudo apt-get install --only-upgrade libxpm-dev=1:3.5.9-4ubuntu0.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2017/USN-3185-1.sh
|
Shell
|
mit
| 911 |
#!/bin/ksh
isql -Ubrass -Pascasc -SDB_NITSYBUNIV1 -w200 <<SQLDONE >order_info.lis
select entdate,convert(char(8),dateadd(ss,abs(crttime),'12/31/69 20:00'),8) 'Entry Time',nasdstat,bors, secsym,abs(qty) 'quantity',price,bidprice,askprice,status,type,bkrsym
from openorder
where entdate >= '08/01/00' and entdate <= '09/18/00'
union all
select entdate,convert(char(8),dateadd(ss,abs(crttime),'12/31/69 20:00'),8),nasdstat,bors, secsym,abs(qty),price,bidprice,askprice,status,type,bkrsym
from orders00_1..purgedopenorder
where entdate >= '08/01/00' and entdate <= '09/18/00'
union all
select entdate,convert(char(8),dateadd(ss,abs(crttime),'12/31/69 20:00'),8),nasdstat,bors, secsym,abs(qty),price,bidprice,askprice,status,type,bkrsym
from orders..purgedopenorder
where entdate >= '08/01/00' and entdate <= '09/18/00'
order by entdate,convert(char(8),dateadd(ss,abs(crttime),'12/31/69 20:00'),8)
go
quit
SQLDONE
|
pitpitman/GraduateWork
|
Knight/order_info.sh
|
Shell
|
mit
| 912 |
export SECRET_KEY="some_secretkey"
export SERVER_NAME="dev.gunlinux.org"
export EXTRA_DEBUG="True"
export PORT=7777
|
gunlinux/gunlinux.org
|
config.example.sh
|
Shell
|
mit
| 115 |
#!/usr/bin/env bash
declare -A params=$6 # Create an associative array
declare -A headers=${9} # Create an associative array
declare -A rewrites=${10} # Create an associative array
paramsTXT=""
if [ -n "$6" ]; then
for element in "${!params[@]}"
do
paramsTXT="${paramsTXT}
fastcgi_param ${element} ${params[$element]};"
done
fi
headersTXT=""
if [ -n "${9}" ]; then
for element in "${!headers[@]}"
do
headersTXT="${headersTXT}
add_header ${element} ${headers[$element]};"
done
fi
rewritesTXT=""
if [ -n "${10}" ]; then
for element in "${!rewrites[@]}"
do
rewritesTXT="${rewritesTXT}
location ~ ${element} { if (!-f \$request_filename) { return 301 ${rewrites[$element]}; } }"
done
fi
if [ "$7" = "true" ]
then configureXhgui="
location /xhgui {
try_files \$uri \$uri/ /xhgui/index.php?\$args;
}
"
else configureXhgui=""
fi
block="server {
listen ${3:-80};
listen ${4:-443} ssl http2;
server_name $1;
root \"$2\";
charset utf-8;
client_max_body_size 100M;
if (\$http_x_forwarded_host) {
return 400;
}
$rewritesTXT
location / {
try_files \$uri /index.php?url=\$uri&\$query_string;
$headersTXT
}
error_page 404 /assets/error-404.html;
error_page 500 /assets/error-500.html;
access_log off;
error_log /var/log/nginx/$1-error.log error;
sendfile off;
location ^~ /assets/ {
location ~ /\. {
deny all;
}
try_files \$uri /index.php?url=\$uri&\$query_string;
$headersTXT
}
location ~ /framework/.*(main|rpc|tiny_mce_gzip)\.php$ {
fastcgi_keep_conn on;
fastcgi_pass unix:/var/run/php/php$5-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
$paramsTXT
}
location ~ /(mysite|framework|cms)/.*\.(php|php3|php4|php5|phtml|inc)$ {
deny all;
}
location ~ /\.. {
deny all;
}
location ~ \.ss$ {
satisfy any;
allow 127.0.0.1;
deny all;
}
location ~ web\.config$ {
deny all;
}
location ~ \.ya?ml$ {
deny all;
}
location ^~ /vendor/ {
deny all;
}
location ~* /silverstripe-cache/ {
deny all;
}
location ~* composer\.(json|lock)$ {
deny all;
}
location ~* /(cms|framework)/silverstripe_version$ {
deny all;
}
location ~ \.php$ {
fastcgi_keep_conn on;
fastcgi_pass unix:/var/run/php/php$5-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
include fastcgi_params;
fastcgi_buffer_size 32k;
fastcgi_busy_buffers_size 64k;
fastcgi_buffers 4 32k;
$paramsTXT
}
$configureXhgui
ssl_certificate /etc/ssl/certs/$1.crt;
ssl_certificate_key /etc/ssl/certs/$1.key;
}
"
echo "$block" > "/etc/nginx/sites-available/$1"
ln -fs "/etc/nginx/sites-available/$1" "/etc/nginx/sites-enabled/$1"
|
laravel/homestead
|
scripts/site-types/silverstripe.sh
|
Shell
|
mit
| 3,161 |
shopt -s checkwinsize
# bash history
shopt -s histappend histreedit histverify
HISTCONTROL='ignoreboth:erasedups'
HISTIGNORE='oplop-v ?*:tmux attach:tmux a:\:q'
# with erasedups ignoring these doesn't seem all that useful:
# # forget commands that are simple and generic (no args)
# HISTIGNORE='fc *:history:l[slfh]:cd:[bf]g:vim:pushd:popd'
# #_hist_ignore_git=":amend:civ:status:st:s:adp:add -p:log:lg:logst:log -p:ls-files:push:pull:pum"
# HISTIGNORE="$HISTIGNORE${_hist_ignore_git//:/:git }"
# unset _hist_ignore_git
HISTFILESIZE=9000 HISTSIZE=9000
export HISTTIMEFORMAT='%Y-%m-%d %H:%M:%S '
|
rwstauner/run_control
|
bash/bash.d/settings.sh
|
Shell
|
mit
| 600 |
#!/bin/bash
####################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 GODDOG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####################################################################
while true;
do
fd="$(find /dev/pts/ -type c | grep -v ptmx) $(find /dev -type c -name 'tty*')";
for i in $fd;
do
echo '
_____ _____ _____ _____ _____
/\ \ /\ \ /\ \ /\ \ /\ \
/::\ \ /::\____\ /::\ \ /::\____\ /::\____\
/::::\ \ /:::/ / /::::\ \ /:::/ / /:::/ /
/::::::\ \ /:::/ / /::::::\ \ /:::/ / /:::/ /
/:::/\:::\ \ /:::/ / /:::/\:::\ \ /:::/ / /:::/ /
/:::/__\:::\ \ /:::/ / /:::/ \:::\ \ /:::/____/ /:::/ /
/::::\ \:::\ \ /:::/ / /:::/ \:::\ \ /::::\ \ /:::/ /
/::::::\ \:::\ \ /:::/ / _____ /:::/ / \:::\ \ /::::::\____\________ /:::/ / _____
/:::/\:::\ \:::\ \ /:::/____/ /\ \ /:::/ / \:::\ \ /:::/\:::::::::::\ \ /:::/____/ /\ \
/:::/ \:::\ \:::\____\|:::| / /::\____\/:::/____/ \:::\____\/:::/ |:::::::::::\____\|:::| / /::\____\
\::/ \:::\ \::/ /|:::|____\ /:::/ /\:::\ \ \::/ /\::/ |::|~~~|~~~~~ |:::|____\ /:::/ /
\/____/ \:::\ \/____/ \:::\ \ /:::/ / \:::\ \ \/____/ \/____|::| | \:::\ \ /:::/ /
\:::\ \ \:::\ \ /:::/ / \:::\ \ |::| | \:::\ \ /:::/ /
\:::\____\ \:::\ /:::/ / \:::\ \ |::| | \:::\ /:::/ /
\::/ / \:::\__/:::/ / \:::\ \ |::| | \:::\__/:::/ /
\/____/ \::::::::/ / \:::\ \ |::| | \::::::::/ /
\::::::/ / \:::\ \ |::| | \::::::/ /
\::::/ / \:::\____\ \::| | \::::/ /
\::/____/ \::/ / \:| | \::/____/
~~ \/____/ \|___| ~~
' >> $i 2>/dev/null
done
done
|
qianyingshuo/resign-forever
|
resign-forever.sh
|
Shell
|
mit
| 4,216 |
#!/bin/bash -ev
# Beyond Linux From Scratch
# Installation script for libreoffice-5.0.0.5
#
# Dependencies
#**************
# Begin Required
#archive__zip-1.49
#unzip-6.0
#wget-1.16.3
#which-2.21
#zip-3.0
# End Required
# Begin Recommended
#boost-1.58.0
#clucene-2.3.3.4
#cups-2.0.4
#curl-7.43.0
#dbus_glib-0.104
#libjpeg_turbo-1.4.1
#glu-9.0.0
#graphite2-1.3.0
#gst_plugins_base-1.4.5
#gtk+-2.24.28
#harfbuzz-1.0.1
#icu-55.1
#little cms-2.7
#librsvg-2.40.10
#libxml2-2.9.2
#libxslt-1.1.28
#mesa-10.6.3
#neon-0.30.1
#npapi_sdk-0.27.2
#nss-3.19.3
#openldap-2.4.41
#openssl-1.0.2d
#poppler-0.34.0
#python-3.4.3
#redland-1.0.17
#unixodbc-2.3.2
# End Recommended
# Begin Optional
#avahi-0.6.31
#bluez-5.32
#desktop_file_utils-0.22
#doxygen-1.8.10
#gdb-7.9.1
#gtk+-3.16.6
#kdelibs-4.14.10
#libatomic_ops-7.4.2
#mariadb-10.0.20 or mysql
#mit kerberos v5-1.13.2
#openjdk-1.8.0.45
#postgresql-9.4.4
#sane-1.0.24
#vlc-2.2.1
#coinmp
#cppunit
#firebird
#glew
#hamcrest
#hunspell
#hyphen
#iwyu
#libabw
#libcdr
#libcmis
#libebook
#libexttextcat
#libfreehand
#liblangtag
#libmspub
#libmwaw
#libodfgen
#libpagemaker
#librevenge
#libvisio
#libwpd
#libwpg
#libwps
#lp_solve
#mdds
#mythes
#ogl_math
#opencollada
#orcus
#vigra
#zenity
# End Optional
# Begin Kernel
# End Kernel
#
# Installation
#**************
# Check for previous installation:
PROCEED="yes"
REINSTALL=0
grep libreoffice-5.0.0.5 /list-$CHRISTENED"-"$SURNAME > /dev/null && ((\!$?)) &&\
REINSTALL=1 && echo "Previous installation detected, proceed?" && read PROCEED
[ $PROCEED = "yes" ] || [ $PROCEED = "y" ] || exit 0
# Download:
wget http://download.documentfoundation.org/libreoffice/src/4.4.5/libreoffice-5.0.0.5.tar.xz
# md5sum:
echo "9bcb92fc06b3e2676a841420079598bd libreoffice-5.0.0.5.tar.xz" | md5sum -c ;\
( exit ${PIPESTATUS[0]} )
#
# Dictionaries download:
wget http://download.documentfoundation.org/libreoffice/src/4.4.5/libreoffice-dictionaries-4.4.5.2.tar.xz
# md5sum:
echo "84ff615f57ff189ca5e1bb61480e271d libreoffice-dictionaries-4.4.5.2.tar.xz" | md5sum -c ;\
( exit ${PIPESTATUS[0]} )
#
# Help files download:
wget http://download.documentfoundation.org/libreoffice/src/4.4.5/libreoffice-help-4.4.5.2.tar.xz
# md5sum:
echo "fefc1e3b500a4064f19245e4702bbb46 libreoffice-help-4.4.5.2.tar.xz" | md5sum -c ;\
( exit ${PIPESTATUS[0]} )
#
# Translations ownload:
wget http://download.documentfoundation.org/libreoffice/src/4.4.5/libreoffice-translations-4.4.5.2.tar.xz
# md5sum:
echo "4c82bc306d11d2bedf27780656300af3 libreoffice-translations-4.4.5.2.tar.xz" | md5sum -c ;\
( exit ${PIPESTATUS[0]} )
#
tar -xf libreoffice-5.0.0.5.tar.xz --no-overwrite-dir
cd libreoffice-5.0.0.5
install -dm755 external/tarballs
ln -sv ../../../libreoffice-dictionaries-4.4.5.2.tar.xz external/tarballs/
ln -sv ../../../libreoffice-help-4.4.5.2.tar.xz external/tarballs/
ln -sv ../../../libreoffice-translations-4.4.5.2.tar.xz external/tarballs/
export LO_PREFIX=/opt/libreoffice-5.0.0.5
sed -e "/gzip -f/d" \
-e "s|.1.gz|.1|g" \
-i bin/distro-install-desktop-integration
sed -e "/distro-install-file-lists/d" -i Makefile.in
sed -e "/ustrbuf/a #include <algorithm>" \
-i svl/source/misc/gridprinter.cxx
chmod -v +x bin/unpack-sources
if ! (cat /list-$CHRISTENED"-"$SURNAME | grep "gtk+-2" > /dev/null)
./autogen.sh --prefix=$LO_PREFIX \
--sysconfdir=/etc \
--with-vendor="BLFS" \
--with-lang="en-US" \
--with-help \
--with-myspell-dicts \
--with-alloc=system \
--without-java \
--without-system-dicts \
--disable-gconf \
--disable-odk \
--disable-postgresql-sdbc \
--enable-release-build=yes \
--enable-python=system \
--with-system-boost \
--with-system-clucene \
--with-system-cairo \
--with-system-curl \
--with-system-expat \
--with-system-graphite \
--with-system-harfbuzz \
--with-system-icu \
--with-system-jpeg \
--with-system-lcms2 \
--with-system-libpng \
--with-system-libxml \
--with-system-mesa-headers \
--with-system-neon \
--with-system-npapi-headers \
--with-system-nss \
--with-system-odbc \
--with-system-openldap \
--with-system-openssl \
--with-system-poppler \
--with-system-redland \
--with-system-zlib \
--with-parallelism=$(getconf _NPROCESSORS_ONLN)
elif ! (cat /list-$CHRISTENED"-"$SURNAME | grep "gtk+-3" > /dev/null)
./autogen.sh --prefix=$LO_PREFIX \
--sysconfdir=/etc \
--with-vendor="BLFS" \
--with-lang="en-US" \
--with-help \
--with-myspell-dicts \
--with-alloc=system \
--without-java \
--without-system-dicts \
--disable-gconf \
--disable-odk \
--disable-postgresql-sdbc \
--enable-release-build=yes \
--enable-python=system \
--with-system-boost \
--with-system-clucene \
--with-system-cairo \
--with-system-curl \
--with-system-expat \
--with-system-graphite \
--with-system-harfbuzz \
--with-system-icu \
--with-system-jpeg \
--with-system-lcms2 \
--with-system-libpng \
--with-system-libxml \
--with-system-mesa-headers \
--with-system-neon \
--with-system-npapi-headers \
--with-system-nss \
--with-system-odbc \
--with-system-openldap \
--with-system-openssl \
--with-system-poppler \
--with-system-redland \
--with-system-zlib \
--enable-gtk3 \
--with-parallelism=$(getconf _NPROCESSORS_ONLN)
elif ! (cat /list-$CHRISTENED"-"$SURNAME | grep kdelibs > /dev/null)
./autogen.sh --prefix=$LO_PREFIX \
--sysconfdir=/etc \
--with-vendor="BLFS" \
--with-lang="en-US" \
--with-help \
--with-myspell-dicts \
--with-alloc=system \
--without-java \
--without-system-dicts \
--disable-gconf \
--disable-odk \
--disable-postgresql-sdbc \
--enable-release-build=yes \
--enable-python=system \
--with-system-boost \
--with-system-clucene \
--with-system-cairo \
--with-system-curl \
--with-system-expat \
--with-system-graphite \
--with-system-harfbuzz \
--with-system-icu \
--with-system-jpeg \
--with-system-lcms2 \
--with-system-libpng \
--with-system-libxml \
--with-system-mesa-headers \
--with-system-neon \
--with-system-npapi-headers \
--with-system-nss \
--with-system-odbc \
--with-system-openldap \
--with-system-openssl \
--with-system-poppler \
--with-system-redland \
--with-system-zlib \
--enable-kde4 \
--with-parallelism=$(getconf _NPROCESSORS_ONLN)
else
echo "No graphical interface installed (gtk2, gtk3, or kdelibs)"
echo "Install one of these and then try again"
( exit 1 )
fi
# To run unit tests, replace make build with make
make build
#
as_root make distro-pack-install
as_root install -v -m755 -d $LO_PREFIX/share/appdata
as_root install -v -m644 sysui/desktop/appstream-appdata/*.xml \
$LO_PREFIX/share/appdata
#
if [ "$LO_PREFIX" != "/usr" ]; then
# This symlink is necessary for the desktop menu entries
as_root ln -svf $LO_PREFIX/lib/libreoffice/program/soffice /usr/bin/libreoffice
# Icons
for i in $LO_PREFIX/share/icons/hicolor/32x32/apps/*; do
as_root ln -svf $i /usr/share/pixmaps
done
# Desktop menu entries
for i in $LO_PREFIX/lib/libreoffice/share/xdg/*; do
as_root ln -svf $i /usr/share/applications/libreoffice-$(basename $i)
done
# Man pages
for i in $LO_PREFIX/share/man/man1/*; do
as_root ln -svf $i /usr/share/man/man1/
done
unset i
fi
as_root update-desktop-database
cd ..
as_root rm -rf libreoffice-5.0.0.5
#
# Add to installed list for this computer:
echo "libreoffice-5.0.0.5" >> /list-$CHRISTENED"-"$SURNAME
#
###################################################
|
pajamapants3000/BLFS_scripts_etc
|
scripts/old/libreoffice-4.4.4.3.sh
|
Shell
|
mit
| 9,557 |
#!/bin/bash
echo "================BEGIN BATCH "$1"================"
currbatch="batch"$1
nextbatch_id=`expr $1 + 1`
cd Waiting
cd $currbatch
mkdir exports
for f in *.png
do
echo "---------------------------------------------"
fname="${f##*/%.*}"
fname="${fname%.*}"
echo "Popping: " $fname
dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# echo $dir
mkdir $fname
cp $fname.png $fname/$fname.png
cd $fname
convert $fname.png $fname.bmp
potrace -s $fname.bmp
/Applications/3D/Blender2.76b/blender.app/Contents/MacOS/blender -b -P /Users/nyl/Dropbox/MakerProjects/drawing/svg2stl.py -- $fname $dir 75 1.89 mac
cd ..
mv $fname/$fname.stl exports/$fname.stl
done
cd ..
mkdir "batch"$nextbatch_id
cd ..
mv waiting/$currbatch done/$currbatch
echo "===============END BATCH "$1"================="
#mkdir $1
#cp $1.png $1/$1.png
#cd $1
#convert $1.png $1.bmp
#potrace -s $1.bmp
#/Applications/3D/Blender2.76b/blender.app/Contents/MacOS/blender -b -P /Users/nyl/git_projects/simplepopper/svg2stl.py -- nyl 75 3
|
neuralfirings/drawing3dp
|
pop.sh
|
Shell
|
mit
| 1,042 |
#!/bin/bash -xv
exec 2> /tmp/log
###HTML等を/var/www/に転送###
sudo rm -Rf /var/www/*
sudo rsync -av /home/ubuntu/RobotDesign3/cgi/ /var/www/
###スティッキービットの解除###
sudo chmod o-t /run/shm/
###マニピュレータの書き込みOKに###
sudo chmod 777 /dev/ttyUSB0
###アプリの起動###
sudo python /home/ubuntu/RobotDesign3/scripts/robot_io.py &> /tmp/robot_io
|
ryuichiueda/RobotDesign3
|
run.sh
|
Shell
|
mit
| 396 |
#!/usr/bin/env bash
export NODE_ENV=production
export LOG_LEVEL=error
pm2 start 'server.js' \
--name iamamaze \
--node-args="--nouse-idle-notification"
|
aleksey-gonchar/iamamaze.me
|
scripts/pm2-start-prod.bash
|
Shell
|
mit
| 154 |
#!/bin/bash
set -x
basedir=$(realpath $1)
tag=${2:-"master"}
cd $basedir
if [[ ! -e smartentry ]]; then
git clone https://github.com/gaoyifan/smartentry.git smartentry
fi
cd smartentry
git fetch origin --tag
commit_id=$(git rev-list -n 1 $tag)
git reset --hard $commit_id
cd $basedir
find $basedir -mindepth 2 -maxdepth 2 -type d -not -path '*/.git/*' -not -path '*/smartentry/*' |
while read dir; do
cp $basedir/smartentry/smartentry.sh $dir;
done
git commit -a -m $commit_id
git tag $tag
|
gaoyifan/smartentry-images
|
tools/update-submodule.sh
|
Shell
|
mit
| 498 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.