code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
binary_packages='deb http://download.noosfero.org/debian/wheezy-1.3 ./'
source_packages=$(echo "$binary_packages" | sed -e 's/^deb/deb-src/')
if ! grep -q "$binary_packages" /etc/apt/sources.list.d/noosfero.list; then
sudo tee /etc/apt/sources.list.d/noosfero.list <<EOF
$binary_packages
$source_packages
EOF
sudo apt-key add - <<EOF
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: GnuPG v1.4.9 (GNU/Linux)
mQGiBE1HCIURBADw6SnRbc1qCHdTV9wD0rxSMIWevzUX+bnDgvV455yudqtVFUhX
2QYvtlwclllbLWKzRdiM7GsBi+2DyWli4B17xl86A5RBQNdc1v1vWZG3QwURxd4E
46fC6mR/K09mJl7aD0yq1rFFLt8pq8aCn6geslqqwAkQHR1gXEL8ftqcpwCg7EkU
n/yivf3qPX03zWBDmdQZog0D/2z0JGdVqLZJHAKjndKHSCuQlP+8d8NF0d27kStN
hJjX8WcBLFKo9BeZUZnc0Kgq7+6p3wuvI1MzyqSEVEi2YxSB0zXU59HGrXtRQlQ2
YksppP2Hwe30/qrLgWJnNP4pxmWjv0F3PFSD4Om07hGxJ2ldWdBlfh2mAwOPtSXK
yYTZA/93+OvQSyneVEBNMH58cCB98tbnFz15VBdinNLRUpbWYMq/UGjDr5HCn54B
zh/SZOEVRVxgC8LMHsimNkBmpe2b6/5UdRa24CWb0iZV1mHEhNnaVp0PdMq2ljW0
T43e2pXeDuhfeFeELJyFdaQBRG7NIN+Udnu0tGZH3RonqVPM6LRETm9vc2Zlcm8g
QXJjaGl2ZSBTaWduaW5nIEtleSA8bm9vc2Zlcm8tZGV2QGxpc3Rhcy5zb2Z0d2Fy
ZWxpdnJlLm9yZz6IYAQTEQIAIAUCTUcIhQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4B
AheAAAoJELpeS0yfHm2nWpQAoNA5o6KDy8WUcXUHTfhmm5LYzPlQAJ91Ar/zaMdY
9g/5zr9/Quy8NIUpwLkEDQRNRwiFEBAAmtDGneyUvMU6HHA3sd9rgfa+EXHzGSwG
NvpREyAhxijnfPx4AUOCCOnh2Cf6jrwbxNNsWzgYVMdsO7yS/h1BHkO4t+RiPrYg
nEggQFU2SNff+TZPYci7aeXPTs9/K4IyKQ/+whYpO8R8LFGECz7b7F1vyPzCHGbs
Ki7mrNyinRFYVlpnmML7hBfrSFItSdefe59RL9Tu2kvr+vUvk60yvbdu93OrY5J7
ADAMN+NGPyPr/Y3Q9cXREmIRr5EV7U0IFBSDybMyvYODUc1gt25y+Byh3Yz7EyEZ
N+0Oh3A1CydWkjrWUwpuNe/Eni6B8awu4nYq9ow4VMMZLE3ruhMeMj5YX74qg3Fl
mOUODM5ffWbfiGaD2r4I+ZuH1VWvgPWWSLHHt8UI7eQLMxPWOoKVpKPPeme/27Rj
qXljFWZpuhsmVuGN32R79T5jCnZUKAaciwvYN9ucZ3RazdhynpX1izmSCWkZEaCb
+YNF3w/Wc9DqB9Ai78cVJzGqe7O11P4xtSI4T8oCx7oWlxHxlXUWD3Oa1b2yrXuL
hDmF8uyUFRSKSVtP8et2SbCozF/wK90DCy55FqUdraDahyAt8kFgM3CQR9mRh56p
EWorsDpd08puRFoPevEGe99+McZ29pR6f3RbrcFe2ws7lw2w8AJbHgelXRbeEie+
x/4Nfu/UATsAAwUP+gN2nSgLAS2Md3awg9mBI6VufflMbsuZJxjemJ9Phdyx5PR2
PvRvyZffaqZltTbBxPiOA1wAIpeWNVJehCpiZgXih93HMTrucBvYyLlbxr7Or7ex
t1/K7TZo5Si+yJ6zNCNXewPimZCV1oUWE8P2uy8iyMUhgpFc7q7xeQCOkvqYphlA
bUT8BcD6Coo4s98gOfgetch0fgCdiCYTNbT0+7jOw8sTx7DmlQHKSmQ6NXOZypI7
lk3OwZIGB6t+Os2Q8uLYxoWzK6fqc8CSSgQPpL4wd4w9/etwzav3/SiZJN3NE0UL
RoayneyD0bC83w2HAEcYb8qDsF85pPkjXSXZdlXulTZC89/4yq8h6hJODOQ7hKTx
TvEE5i3LmAYj+uTbuoauYBJMiU2oXrqfCGR+tmxz5V7QSwLdy0d95w0F/Rj1sesO
SfBRGyxqSqQsO9KDMJdmi/FyjiPBVKE3i9YFWsePLnHs3JNCRehDt3xpap3YrjBW
MAMb36KpZ9M6Cj2nRjB4pfVNno0hmsQ3+8So2vBW/UAbHUW/izQPRFVp+HXVxDf6
xjIi9gyocstFCkKrD7NFL/7u6fWginUNXIjYAdqbqRIihzfW7Et2QiPL4tnQrQey
4P8Y7+gThn0CWeJw4leCueYr/yYUJ7lelYCd9q2uphC/2KinUxBSInKjQ7+8iEkE
GBECAAkFAk1HCIUCGwwACgkQul5LTJ8ebae2qgCeOMvYOOVDVtchTRhD56VlYKOi
FPQAoNmiMgP6zGF9rgOEWMEiFEryayrz
=70DR
-----END PGP PUBLIC KEY BLOCK-----
EOF
fi
if grep -qrl wheezy /etc/apt/sources.list* && ! grep -qrl wheezy-backports /etc/apt/sources.list*; then
sudo tee /etc/apt/sources.list.d/backports.list <<EOF
deb http://httpredir.debian.org/debian wheezy-backports main
EOF
fi
if test -f tmp/debian/Release.gpg; then
echo "deb file://$(pwd)/tmp/debian/ ./" | sudo tee /etc/apt/sources.list.d/local.list
sudo apt-key add tmp/debian/signing-key.asc
else
sudo rm -f /etc/apt/sources.list.d/local.list
fi
run sudo apt-get update
run sudo apt-get -qy dist-upgrade
run sudo apt-get -y install dctrl-tools
# need these from backports
run sudo apt-get -y install -t wheezy-backports ruby-rspec unicorn
# needed to run noosfero
packages=$(grep-dctrl -n -s Build-Depends,Depends,Recommends -S -X noosfero debian/control | sed -e '/^\s*#/d; s/([^)]*)//g; s/,\s*/\n/g' | grep -v 'memcached\|debconf\|dbconfig-common\|misc:Depends\|adduser\|mail-transport-agent')
run sudo apt-get -y install $packages
sudo apt-get -y install iceweasel || sudo apt-get -y install firefox
run rm -f Gemfile.lock
run bundle --local
|
AlessandroCaetano/noosfero
|
script/install-dependencies/debian-wheezy.sh
|
Shell
|
agpl-3.0
| 3,834 |
#!/bin/sh
test_description='pdsh internal testcases
Run pdsh internal testsuite'
. ${srcdir:-.}/test-lib.sh
test_expect_success 'working xstrerrorcat' '
pdsh -T0
'
test_expect_success 'working pipecmd' '
pdsh -T1
'
test_done
|
grondo/pdsh
|
tests/t0002-internal.sh
|
Shell
|
gpl-2.0
| 231 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env bash
python -m grpc_tools.protoc -I../../proto --python_out=python --grpc_python_out=python ../../proto/ipython.proto
|
herval/zeppelin
|
python/src/main/resources/grpc/generate_rpc.sh
|
Shell
|
apache-2.0
| 915 |
#!/usr/bin/env bash
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
. ./test-lib.sh
setup_initgit
setup_gitgit
(
set -e
cd git-git
git checkout -q --track -b work origin
echo "some work done on a branch" >> test
git add test; git commit -q -m "branch work"
echo "some other work done on a branch" >> test
git add test; git commit -q -m "branch work"
test_expect_success "git-cl upload wants a server" \
"$GIT_CL upload --no-oauth2 2>&1 | grep -q 'You must configure'"
git config rietveld.server localhost:10000
test_expect_success "git-cl status has no issue" \
"$GIT_CL_STATUS | grep -q 'no issue'"
# Prevent the editor from coming up when you upload.
export EDITOR=$(which true)
test_expect_success "upload succeeds (needs a server running on localhost)" \
"$GIT_CL upload --no-oauth2 -m test master | \
grep -q 'Issue created'"
test_expect_success "git-cl status now knows the issue" \
"$GIT_CL_STATUS | grep -q 'Issue number'"
# Check to see if the description contains the local commit messages.
# Should contain 'branch work' x 2.
test_expect_success "git-cl status has the right description for the log" \
"$GIT_CL_STATUS --field desc | [ $( egrep -q '^branch work$' -c ) -eq 2 ]
test_expect_success "git-cl status has the right subject from message" \
"$GIT_CL_STATUS --field desc | \
[ $( egrep -q '^test$' --byte-offset) | grep '^0:' ]
test_expect_success "git-cl push ok" \
"$GIT_CL push -f --no-oauth2"
git checkout -q master > /dev/null 2>&1
git pull -q > /dev/null 2>&1
test_expect_success "committed code has proper description" \
"git show | [ $( egrep -q '^branch work$' -c ) -eq 2 ]
test_expect_success "issue no longer has a branch" \
"$GIT_CL_STATUS | grep -q 'work : None'"
cd $GITREPO_PATH
test_expect_success "upstream repo has our commit" \
"git log master 2>/dev/null | [ $( egrep -q '^branch work$' -c ) -eq 2 ]
)
SUCCESS=$?
cleanup
if [ $SUCCESS == 0 ]; then
echo PASS
fi
|
Teamxrtc/webrtc-streaming-node
|
third_party/depot_tools/tests/push-from-logs.sh
|
Shell
|
mit
| 2,161 |
# Load up the PRAD_BROAD Meta Data File
./importCancerStudy.pl $PORTAL_DATA_HOME/public-override/prad_mskcc/prad_mskcc.txt
# Imports All Case Lists
./importCaseList.pl $PORTAL_DATA_HOME/public-override/prad_mskcc/case_lists
# Imports Clinical Data
./importClinicalData.pl $PORTAL_DATA_HOME/public-override/prad_mskcc/prad_mskcc_clinical.txt prad_mskcc
# Imports Mutation Data
./importProfileData.pl --data $PORTAL_DATA_HOME/public-override/prad_mskcc/data_mutations_extended.txt --meta $PORTAL_DATA_HOME/public-override/prad_mskcc/meta_mutations_extended.txt --dbmsAction clobber
# Imports Copy Number Data
./importProfileData.pl --data $PORTAL_DATA_HOME/public-override/prad_mskcc/data_CNA.txt --meta $PORTAL_DATA_HOME/public-override/prad_mskcc/meta_CNA.txt --dbmsAction clobber
# Copy number segment
#./importCopyNumberSegmentData.pl $PORTAL_DATA_HOME/public-override/prad_mskcc/prad_mskcc_scna_hg18.seg prad_mskcc
# Imports MRNA Expression Data
./importProfileData.pl --data $PORTAL_DATA_HOME/public-override/prad_mskcc/data_mRNA_ZbyNorm.txt --meta $PORTAL_DATA_HOME/public-override/prad_mskcc/meta_mRNA_ZbyNorm.txt --dbmsAction clobber
# MutSig
./importMutSig.pl $PORTAL_DATA_HOME/public-override/prad_mskcc/data_mutsig.txt $PORTAL_DATA_HOME/public-override/prad_mskcc/meta_mutsig.txt
|
j-hudecek/cbioportal
|
core/src/main/scripts/all-prostate-mskcc.sh
|
Shell
|
agpl-3.0
| 1,296 |
#!/bin/sh
# arm_branch_out_of_range.sh -- test ARM/THUMB/THUMB branch instructions whose
# targets are just out of the branch range limits.
# Copyright (C) 2010-2015 Free Software Foundation, Inc.
# Written by Doug Kwan <[email protected]>
# This file is part of gold.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
# MA 02110-1301, USA.
# This file goes with the assembler source files arm_bl_out_of_range.s,
# thumb_bl_out_of_range.s and thumb_bl_out_of_range_local.s that are assembled
# and linked to check that branches whose target are just out of the branch
# range limits are handle correctly.
check()
{
file=$1
pattern=$2
found=`grep "$pattern" $file`
if test -z "$found"; then
echo "pattern \"$pattern\" not found in file $file."
exit 1
fi
}
# This is a bit crude. Also, there are tabs in the grep patterns.
check arm_bl_out_of_range.stdout \
" 4000004: eb00003d bl 4000100 <.*>"
check arm_bl_out_of_range.stdout \
" 4000008: eb00003e bl 4000108 <.*>"
check arm_bl_out_of_range.stdout \
" 4000100: e51ff004 ldr pc, \[pc, #-4\]"
check arm_bl_out_of_range.stdout \
" 4000104: 02000008 "
check arm_bl_out_of_range.stdout \
" 4000108: e51ff004 ldr pc, \[pc, #-4\]"
check arm_bl_out_of_range.stdout \
" 400010c: 06000010 "
check thumb_bl_out_of_range.stdout \
" 800004: f000 e87c blx 800100 <.*>"
check thumb_bl_out_of_range.stdout \
" 800008: f000 e87e blx 800108 <.*>"
check thumb_bl_out_of_range.stdout \
" 800100: e51ff004 ldr pc, \[pc, #-4\]"
check thumb_bl_out_of_range.stdout \
" 800104: 00400007 "
check thumb_bl_out_of_range.stdout \
" 800108: e51ff004 ldr pc, \[pc, #-4\]"
check thumb_bl_out_of_range.stdout \
" 80010c: 00c0000d "
check thumb_blx_out_of_range.stdout \
" 800004: f000 e87c blx 800100 <.*>"
check thumb_blx_out_of_range.stdout \
" 80000a: f000 e87e blx 800108 <.*>"
check thumb_blx_out_of_range.stdout \
" 800100: e51ff004 ldr pc, \[pc, #-4\]"
check thumb_blx_out_of_range.stdout \
" 800104: 00400004 "
check thumb_blx_out_of_range.stdout \
" 800108: e51ff004 ldr pc, \[pc, #-4\]"
check thumb_blx_out_of_range.stdout \
" 80010c: 00c0000c "
check thumb_bl_out_of_range_local.stdout \
" 800004: f000 e87c blx 800100 <.*>"
check thumb_bl_out_of_range_local.stdout \
" 800008: f000 e87e blx 800108 <.*>"
check thumb_bl_out_of_range_local.stdout \
" 800100: e51ff004 ldr pc, \[pc, #-4\]"
check thumb_bl_out_of_range_local.stdout \
" 800104: 00400007 "
check thumb_bl_out_of_range_local.stdout \
" 800108: e51ff004 ldr pc, \[pc, #-4\]"
check thumb_bl_out_of_range_local.stdout \
" 80010c: 00c0000d "
check thumb2_bl_out_of_range.stdout \
" 2000004: f000 e87c blx 2000100 <.*>"
check thumb2_bl_out_of_range.stdout \
" 2000008: f000 e87e blx 2000108 <.*>"
check thumb2_bl_out_of_range.stdout \
" 2000100: e51ff004 ldr pc, \[pc, #-4\]"
check thumb2_bl_out_of_range.stdout \
" 2000104: 01000007 "
check thumb2_bl_out_of_range.stdout \
" 2000108: e51ff004 ldr pc, \[pc, #-4\]"
check thumb2_bl_out_of_range.stdout \
" 200010c: 0300000d "
check thumb2_blx_out_of_range.stdout \
" 2000004: f000 e87c blx 2000100 <.*>"
check thumb2_blx_out_of_range.stdout \
" 200000a: f000 e87e blx 2000108 <.*>"
check thumb2_blx_out_of_range.stdout \
" 2000100: e51ff004 ldr pc, \[pc, #-4\]"
check thumb2_blx_out_of_range.stdout \
" 2000104: 01000004 "
check thumb2_blx_out_of_range.stdout \
" 2000108: e51ff004 ldr pc, \[pc, #-4\]"
check thumb2_blx_out_of_range.stdout \
" 200010c: 0300000c "
exit 0
|
selmentdev/selment-toolchain
|
source/binutils-latest/gold/testsuite/arm_branch_out_of_range.sh
|
Shell
|
gpl-3.0
| 4,175 |
#!/bin/bash
#########################################################
# Script Name: configure-ansible.sh
# Author: Gonzalo Ruiz
# Version: 0.1
# Date Created: 01st Marh 2015
# Last Modified: 04st April 17:26 GMT
# Last Modified By: Gonzalo Ruiz
# Description:
# This script automates the installation of this VM as an ansible VM. Specifically it:
# installs ansible on all the nodes
# configures ssh keys
# Parameters :
# 1 - i: IP Pattern
# 2 - n: Number of nodes
# 3 - r: Configure RAID
# 4 - f: filesystem : ext4 or xfs
# Note :
# This script has only been tested on CentOS 6.5 and Ubuntu 12.04 LTS
#########################################################
#---BEGIN VARIABLES---
IP_ADDRESS_SPACE=''
NUMBER_OF_NODES=''
NODE_LIST_IPS=()
CONFIGURE_RAID=''
FILE_SYSTEM=''
USER_NAME=''
USER_PASSWORD=''
TEMPLATE_ROLE='couchbase'
START_IP_INDEX=0
function usage()
{
echo "INFO:"
echo "Usage: configure-ansible.sh [-i IP_ADDRESS_SPACE ] [-n NUMBER_OF_NODES ] [-r CONFIGURE_RAID ] [-f FILE_SYSTEM] "
echo "The -i (ipAddressSpace) parameters specifies the starting IP space for the vms.For instance if you specify 10.0.2.2, and 3 nodes, the script will find for the VMS 10.0.2.20, 10.0.2.21,10.0.2.22.Plase note that Azure reserves the first 4 IPs, so you will have to specify an IP space in which IP x.x.x0 is available"
echo "The -n (numberOfNodes) parameter specifies the number of VMs"
echo "The -r (configureRAID) parameter specifies whether you want to create a RAID with all the available data disks.Allowed values : true or false"
echo "The -f (fileSystem) parameter specifies the file system you want to use.Allowed values : ext4 or xfs"
}
function log()
{
# If you want to enable this logging add a un-comment the line below and add your account id
#curl -X POST -H "content-type:text/plain" --data-binary "${HOSTNAME} - $1" https://logs-01.loggly.com/inputs/<key>/tag/es-extension,${HOSTNAME}
echo "$1"
}
#---PARSE AND VALIDATE PARAMETERS---
if [ $# -ne 8 ]; then
log "ERROR:Wrong number of arguments specified. Parameters received $#. Terminating the script."
usage
exit 1
fi
while getopts :i:n:r:f: optname; do
log "INFO:Option $optname set with value ${OPTARG}"
case $optname in
i) # IP address space
IP_ADDRESS_SPACE=${OPTARG}
;;
n) # Number of VMS
NUMBER_OF_NODES=${OPTARG}
IDX=${START_IP_INDEX}
while [ "${IDX}" -lt "${NUMBER_OF_NODES}" ];
do
NODE_LIST_IPS[$IDX]="${IP_ADDRESS_SPACE}${IDX}"
IDX=$((${IDX} + 1))
done
;;
r) # Configure RAID
CONFIGURE_RAID=${OPTARG}
if [[ "${CONFIGURE_RAID}" != "true" && "${CONFIGURE_RAID}" != "false" ]] ; then
log "ERROR:Configure RAID (-r) value ${CONFIGURE_RAID} not allowed"
usage
exit 1
fi
;;
f) # File system : ext4 or xfs
FILE_SYSTEM=${OPTARG}
if [[ "${FILE_SYSTEM}" != "ext4" && "${FILE_SYSTEM}" != "xfs" ]] ; then
log "ERROR:File system (-f) ${FILE_SYSTEM} not allowed"
usage
exit 1
fi
;;
\?) #Invalid option - show help
log "ERROR:Option -${BOLD}$OPTARG${NORM} not allowed."
usage
exit 1
;;
esac
done
function check_OS()
{
OS=`uname`
KERNEL=`uname -r`
MACH=`uname -m`
if [ -f /etc/redhat-release ] ; then
DistroBasedOn='RedHat'
DIST=`cat /etc/redhat-release |sed s/\ release.*//`
PSUEDONAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//`
REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//`
elif [ -f /etc/SuSE-release ] ; then
DistroBasedOn='SuSe'
PSUEDONAME=`cat /etc/SuSE-release | tr "\n" ' '| sed s/VERSION.*//`
REV=`cat /etc/SuSE-release | tr "\n" ' ' | sed s/.*=\ //`
elif [ -f /etc/debian_version ] ; then
DistroBasedOn='Debian'
if [ -f /etc/lsb-release ] ; then
DIST=`cat /etc/lsb-release | grep '^DISTRIB_ID' | awk -F= '{ print $2 }'`
PSUEDONAME=`cat /etc/lsb-release | grep '^DISTRIB_CODENAME' | awk -F= '{ print $2 }'`
REV=`cat /etc/lsb-release | grep '^DISTRIB_RELEASE' | awk -F= '{ print $2 }'`
fi
fi
OS=$OS
DistroBasedOn=$DistroBasedOn
readonly OS
readonly DIST
readonly DistroBasedOn
readonly PSUEDONAME
readonly REV
readonly KERNEL
readonly MACH
log "INFO: Detected OS : ${OS} Distribution: ${DIST}-${DistroBasedOn}-${PSUEDONAME} Revision: ${REV} Kernel: ${KERNEL}-${MACH}"
}
function install_ansible_ubuntu()
{
apt-get --yes --force-yes install software-properties-common
apt-add-repository ppa:ansible/ansible
apt-get --yes --force-yes update
apt-get --yes --force-yes install ansible
# install sshpass
apt-get --yes --force-yes install sshpass
# install Git
apt-get --yes --force-yes install git
}
function install_ansible_centos()
{
# install EPEL Packages - sshdpass
wget http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
rpm -ivh epel-release-6-8.noarch.rpm
# install ansible
yum -y install ansible
yum install -y libselinux-python
# needed to copy the keys to all the vms
yum -y install sshpass
# install Git
yum -y install git
}
function configure_ssh()
{
# copy ssh private key
mkdir -p ~/.ssh
mv id_rsa ~/.ssh
# set permissions
chmod 700 ~/.ssh
chmod 600 ~/.ssh/id_rsa
# copy root ssh key
cat id_rsa.pub >> ~/.ssh/authorized_keys
rm id_rsa.pub
# set permissions
chmod 600 ~/.ssh/authorized_keys
if [[ "${DIST}" == "Ubuntu" ]]; then
#restart sshd service - Ubuntu
service ssh restart
elif [[ "${DIST}" == "CentOS" ]] ; then
# configure SELinux
restorecon -Rv ~/.ssh
#restart sshd service - CentOS
service sshd restart
fi
}
function configure_ansible()
{
# Copy ansible hosts file
ANSIBLE_HOST_FILE=/etc/ansible/hosts
ANSIBLE_CONFIG_FILE=/etc/ansible/ansible.cfg
mv ${ANSIBLE_HOST_FILE} ${ANSIBLE_HOST_FILE}.backup
mv ${ANSIBLE_CONFIG_FILE} ${ANSIBLE_CONFIG_FILE}.backup
# Accept ssh keys by default
printf "[defaults]\nhost_key_checking = False\n\n" >> "${ANSIBLE_CONFIG_FILE}"
# Shorten the ControlPath to avoid errors with long host names , long user names or deeply nested home directories
echo $'[ssh_connection]\ncontrol_path = ~/.ssh/ansible-%%h-%%r' >> "${ANSIBLE_CONFIG_FILE}"
# Generate a new ansible host file
# printf "[master]\n${IP_ADDRESS_SPACE}.${NUMBER_OF_NODES}\n\n" >> "${ANSIBLE_HOST_FILE}"
printf "[${TEMPLATE_ROLE}]\n${IP_ADDRESS_SPACE}[0:$(($NUMBER_OF_NODES - 1))]" >> "${ANSIBLE_HOST_FILE}"
# Validate ansible configuration
ansible ${TEMPLATE_ROLE} -m ping -v
}
function configure_storage()
{
log "INFO: Configuring Storage "
log "WARNING: This process is not incremental, don't use it if you don't want to lose your existing storage configuration"
# Run ansible template to configure Storage : Create RAID and Configure Filesystem
ansible-playbook InitStorage_RAID.yml --extra-vars "target=${TEMPLATE_ROLE} file_system=${FILE_SYSTEM}"
}
InitializeVMs()
{
check_OS
configure_ssh
if [[ "${DIST}" == "Ubuntu" ]];
then
log "INFO:Installing Ansible for Ubuntu"
install_ansible_ubuntu
elif [[ "${DIST}" == "CentOS" ]] ; then
log "INFO:Installing Ansible for CentOS"
install_ansible_centos
else
log "ERROR:Unsupported OS ${DIST}"
exit 2
fi
configure_ansible
configure_storage
}
InitializeVMs
|
cr0550ver/azure-quickstart-templates
|
ansible-advancedlinux/configure_ansible.sh
|
Shell
|
mit
| 8,004 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
KUBE_REMOTE_RUNTIME_ROOT="${KUBE_ROOT}/pkg/kubelet/api/v1alpha1/runtime"
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
function cleanup {
rm -rf ${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/
}
trap cleanup EXIT
mkdir -p ${KUBE_REMOTE_RUNTIME_ROOT}/_tmp
cp ${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go ${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/
ret=0
KUBE_VERBOSE=3 "${KUBE_ROOT}/hack/update-generated-runtime.sh"
diff -I "gzipped FileDescriptorProto" -I "0x" -Naupr ${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/api.pb.go ${KUBE_REMOTE_RUNTIME_ROOT}/api.pb.go || ret=$?
if [[ $ret -eq 0 ]]; then
echo "Generated container runtime api is up to date."
cp ${KUBE_REMOTE_RUNTIME_ROOT}/_tmp/api.pb.go ${KUBE_REMOTE_RUNTIME_ROOT}/
else
echo "Generated container runtime api is out of date. Please run hack/update-generated-runtime.sh"
exit 1
fi
|
kubeup/kube-aliyun
|
vendor/k8s.io/kubernetes/hack/verify-generated-runtime.sh
|
Shell
|
apache-2.0
| 1,526 |
#!/bin/bash
# This script is meant to be called by the "install" step defined in
# .travis.yml. See http://docs.travis-ci.com/ for more details.
# The behavior of the script is controlled by environment variabled defined
# in the .travis.yml in the top level folder of the project.
# License: 3-clause BSD
set -e
# Fix the compilers to workaround avoid having the Python 3.4 build
# lookup for g++44 unexpectedly.
export CC=gcc
export CXX=g++
if [[ "$DISTRIB" == "conda" ]]; then
# Deactivate the travis-provided virtual environment and setup a
# conda-based environment instead
deactivate
# Use the miniconda installer for faster download / install of conda
# itself
wget http://repo.continuum.io/miniconda/Miniconda-3.6.0-Linux-x86_64.sh \
-O miniconda.sh
chmod +x miniconda.sh && ./miniconda.sh -b
export PATH=/home/travis/miniconda/bin:$PATH
conda update --yes conda
# Configure the conda environment and put it in the path using the
# provided versions
conda create -n testenv --yes python=$PYTHON_VERSION pip nose \
numpy=$NUMPY_VERSION scipy=$SCIPY_VERSION
source activate testenv
if [[ "$INSTALL_MKL" == "true" ]]; then
# Make sure that MKL is used
conda install --yes mkl
else
# Make sure that MKL is not used
conda remove --yes --features mkl || echo "MKL not installed"
fi
elif [[ "$DISTRIB" == "ubuntu" ]]; then
# At the time of writing numpy 1.9.1 is included in the travis
# virtualenv but we want to used numpy installed through apt-get
# install.
deactivate
# Create a new virtualenv using system site packages for numpy and scipy
virtualenv --system-site-packages testvenv
source testvenv/bin/activate
pip install nose
fi
if [[ "$COVERAGE" == "true" ]]; then
pip install coverage coveralls
fi
# Build scikit-learn in the install.sh script to collapse the verbose
# build output in the travis output when it succeeds.
python --version
python -c "import numpy; print('numpy %s' % numpy.__version__)"
python -c "import scipy; print('scipy %s' % scipy.__version__)"
python setup.py build_ext --inplace
|
trankmichael/scikit-learn
|
continuous_integration/install.sh
|
Shell
|
bsd-3-clause
| 2,174 |
#!/usr/bin/env bash
set -e # halt script on error
bundle exec jekyll post "$*"
|
prestodb-rocks/site
|
bin/new_post.sh
|
Shell
|
mit
| 80 |
git clone https://github.com/naibaf7/caffe.git
cd caffe
git checkout master
# Dependencies
sudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libboost-all-dev libhdf5-serial-dev
sudo apt-get install -y protobuf-compiler gfortran libjpeg62 libfreeimage-dev libatlas-base-dev git python-dev python-pip
sudo apt-get install -y libgoogle-glog-dev libbz2-dev libxml2-dev libxslt-dev libffi-dev libssl-dev libgflags-dev liblmdb-dev python-yaml
sudo apt-get install -y libviennacl-dev opencl-headers libopenblas-base libopenblas-dev
easy_install pillow #conda python
# Compile Caffe
cp ../Makefile.config Makefile.config
cores=`grep -c ^processor /proc/cpuinfo`
make all -j$cores VIENNACL_DIR=../ViennaCL-1.7.0/
make test -j$cores VIENNACL_DIR=../ViennaCL-1.7.0/
make runtest -j$cores VIENNACL_DIR=../ViennaCL-1.7.0/
|
soumith/convnet-benchmarks
|
greentea/install.sh
|
Shell
|
mit
| 848 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2536-1
#
# Security announcement date: 2015-03-18 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:22 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libxfont1:1:1.4.4-1ubuntu0.3
#
# Last versions recommanded by security team:
# - libxfont1:1:1.4.4-1ubuntu0.3
#
# CVE List:
# - CVE-2015-1802
# - CVE-2015-1803
# - CVE-2015-1804
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libxfont1=1:1.4.4-1ubuntu0.3 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2015/USN-2536-1.sh
|
Shell
|
mit
| 669 |
#!/bin/sh
set -x
version=`git describe HEAD --tags --always --abbrev=0`
revision=`git describe HEAD --tags --always`
rm "$GOPATH/bin/hashbox/"*.zip
rm "$GOPATH/bin/hashbox/"*
BuildAndZip () {
rm "$GOPATH/bin/hashbox/$2" >/dev/null 2>&1
go build -ldflags "-X main.Version=$revision" -o "$GOPATH/bin/hashbox/$2"
rm "$GOPATH/bin/hashbox/$1-$version.zip" >/dev/null 2>&1
zip -jD "$GOPATH/bin/hashbox/$1-$version.zip" "$GOPATH/bin/hashbox/$2"
}
cd server
go fmt
GOOS=freebsd GOARCH=amd64 BuildAndZip "hashbox-freebsd-amd64" "hashbox-freebsd-amd64"
GOOS=linux GOARCH=arm GOARM=7 BuildAndZip "hashbox-linux-armv7l" "hashbox-linux-armv7l"
GOOS=darwin GOARCH=amd64 BuildAndZip "hashbox-mac-amd64" "hashbox-mac"
GOOS=windows GOARCH=amd64 BuildAndZip "hashbox-windows-amd64" "hashbox-windows.exe"
cd ../hashback
go fmt
GOOS=linux GOARCH=amd64 BuildAndZip "hashback-linux-amd64" "hashback"
GOOS=darwin GOARCH=amd64 BuildAndZip "hashback-mac-amd64" "hashback"
GOOS=windows GOARCH=amd64 BuildAndZip "hashback-win-amd64" "hashback.exe"
GOOS=windows GOARCH=386 BuildAndZip "hashback-win-x86" "hashback-x86.exe"
exit 0
|
fredli74/hashbox
|
build_all.sh
|
Shell
|
mit
| 1,149 |
#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
# -e: immediately exit if any command has a non-zero exit status
# -o: prevents errors in a pipeline from being masked
# IFS new value is less likely to cause confusing bugs when looping arrays or arguments (e.g. $@)
usage() { echo "Usage: $0 -i <subscriptionId> -g <resourceGroupName> -n <deploymentName> -l <resourceGroupLocation> -p <parametersFile>" 1>&2; exit 1; }
declare subscriptionId=""
declare resourceGroupName=""
declare deploymentName=""
declare resourceGroupLocation=""
declare parametersFile=""
# Initialize parameters specified from command line
while getopts ":i:g:n:l:p:" arg; do
case "${arg}" in
i)
subscriptionId=${OPTARG}
;;
g)
resourceGroupName=${OPTARG}
;;
n)
deploymentName=${OPTARG}
;;
l)
resourceGroupLocation=${OPTARG}
;;
p)
parametersFile=${OPTARG}
;;
esac
done
shift $((OPTIND-1))
#Prompt for parameters if some required parameters are missing
if [[ -z "$subscriptionId" ]]; then
echo "Subscription Id:"
read subscriptionId
[[ "${subscriptionId:?}" ]]
fi
if [[ -z "$resourceGroupName" ]]; then
echo "ResourceGroupName:"
read resourceGroupName
[[ "${resourceGroupName:?}" ]]
fi
if [[ -z "$deploymentName" ]]; then
echo "DeploymentName:"
read deploymentName
fi
if [[ -z "$resourceGroupLocation" ]]; then
echo "Enter a location below to create a new resource group else skip this"
echo "ResourceGroupLocation:"
read resourceGroupLocation
fi
#templateFile Path - template file to be used
templateFilePath="template.json"
if [ ! -f "$templateFilePath" ]; then
echo "$templateFilePath not found"
exit 1
fi
if [ ! -f "$parametersFile" ]; then
echo "$parametersFile not found"
exit 1
fi
if [ -z "$subscriptionId" ] || [ -z "$resourceGroupName" ] || [ -z "$deploymentName" ]; then
echo "Either one of subscriptionId, resourceGroupName, deploymentName is empty"
usage
fi
#login to azure using your credentials
az account show 1> /dev/null
if [ $? != 0 ];
then
az login
fi
#set the default subscription id
az account set --subscription $subscriptionId
set +e
#Check for existing RG
rgExists="$(az group exists --name $resourceGroupName)"
if [ "$rgExists" = false ]; then
echo "Resource group with name" $resourceGroupName "could not be found. Creating new resource group.."
set -e
(
set -x
az group create --name $resourceGroupName --location $resourceGroupLocation 1> /dev/null
)
else
echo "Using existing resource group..."
fi
#Start deployment
echo "Starting deployment..."
(
set -x
az group deployment create --name $deploymentName --resource-group $resourceGroupName --template-file $templateFilePath --parameters $parametersFile --debug
)
if [ $? == 0 ];
then
echo "Template has been successfully deployed"
fi
|
pjbgf/container_webapp_netcore
|
arm/deploy.sh
|
Shell
|
mit
| 2,775 |
#!/bin/bash
pushd ~/mdp/mdp
rm nohup-gh.out
rm nohup-gh.err
git reset --hard
git pull
nohup ./github.py > nohup-gh.out 2> nohup-gh.err < /dev/null &
popd
|
n43jl/mdp
|
bash/scripts/run-gh.sh
|
Shell
|
mit
| 171 |
#!/bin/bash
# Source function library.
. /usr/lib64/nagios/plugins/functions
#
# Check on 0.60 only
# This job will take some time, so DON'T run job too often.
# Maybe 30min per job will be OK.
#
svnopt=" --username=ci --password=sp12345678 --no-auth-cache --non-interactive "
glueip="192.168.1.153"
espdump="/tmp/esp_file_list.txt"
# This action will take few seconds.
svn list -R $svnopt http://$glueip/svn/glue/trunk/bundle/xuanran001/src/main/resources/initial-content/apps/ > $espdump
while read -r line;
do
url="http://127.0.0.1:9999/apps/${line}"
# Not a esp file
echo $url | grep "/$" &> /dev/null
if [ $? -eq 0 ]; then
continue
fi
http_code="`curl --max-time 5 -I -s -o /dev/null -w '%{http_code}' $url`"
if [ "X$http_code" != "X200" ]; then
echo "$http_code : $url"
exit $RET_CR
fi
done < $espdump
echo "OK"
exit $RET_OK
|
xuanran001/xuanran001-infrastructure
|
nagios_nrpe/usr/lib64/nagios/plugins/sling_datastore.sh
|
Shell
|
mit
| 867 |
kubectl -n net-policy-test exec -ti dummy-logger -- nc -w10 -q1 api 443 && exit 1
exit 0
|
max-lobur/k8s-weave-demo
|
tests/test_deny_logger-api.sh
|
Shell
|
mit
| 89 |
#!/bin/sh
##Author: Lewis HONG and ZHU O. Yuan
##Script name: 01_BAsE-Seq_alignment.sh
##BAsE-Seq pipeline script part 01 for data processing from fastq to mapped bams, vcfs, and QC plots
#run command from folder containing fastq files as follows
#01_BAsE-Seq_alignment.sh parameter_file.txt
#redirect STDOUT to log file if needed
#see example_parameter_file.txt for parameter file format
##Required software: see full list in README.md
##Calls scripts: (please refer to individual scripts for details)
# Obtain_barcode.pl
# Obtain_pfreads.pl
# filter_sam.pl
# Split_sam.pl
# Sam_to_bam.pl
# Rmdup.pl
# Bam_to_vcf_lofreq.pl
# Bam_to_mpileup.pl
# PairsPerBarcode.pl
# CovPerBarcode.pl
# Script.r
#reads in all necessary parameters for script
echo "Loading parameters from parameter_file.txt"
. ./$1
#extract read pairs with identifiable barcodes
echo "Extracting fastq read pairs with barcodes"
perl $SCRIPTPATH/Obtain_barcode.pl $DATAPATH/$READ1 $DATAPATH/$READ2 $DATAPATH/$PREFX.barcoded_1.fastq $DATAPATH/$PREFX.barcoded_2.fastq $RNLEN $BARCD
#trims away adaptor sequences and headcrop removes barcode sequences, leaves only reads >20bp length
#this segment may be replaced with any equivalent trimmer that can perform the same functions
echo "Trimming adaptor sequences and low quality reads"
fastx_clipper -Q 33 -v -a 'ATGTCGAT' -i $DATAPATH/$PREFX.barcoded_1.fastq | fastx_clipper -Q 33 -v -a 'ATGCTGCCTGCAGG' -i - | java -jar -Djava.io.tmpdir=./tmp -Xmx2g trimmomatic-0.30.jar SE -threads 12 -phred33 /dev/stdin $DATAPATH/$PREFX.barcoded_1_clipped.fastq MINLEN:20 &
fastx_clipper -Q 33 -v -a 'CTGTCTCTTATAC' -i $DATAPATH/$PREFX.barcoded_2.fastq | java -jar -Djava.io.tmpdir=./tmp -Xmx2g trimmomatic-0.30.jar SE -threads 12 -phred33 /dev/stdin $DATAPATH/$PREFX.barcoded_2_clipped.fastq HEADCROP:28 MINLEN:15 &
#retrieve surviving usable read pairs after trimming
echo "Retrieving read pairs post trimming"
perl $SCRIPTPATH/Obtain_pfreads.pl $DATAPATH/$PREFX.barcoded_1_clipped.fastq $DATAPATH/$PREFX.barcoded_2_clipped.fastq $DATAPATH/$PREFX.barcoded_1_clipped_pf.fastq $DATAPATH/$PREFX.barcoded_2_clipped_pf.fastq
#align fastq read pairs to reference genotype
echo "BWA-mem Alignment"
bwa mem $REFPATH $DATAPATH/$PREFX.barcoded_1_clipped_pf.fastq $DATAPATH/$PREFX.barcoded_2_clipped_pf.fastq > $DATAPATH/$PREFX.sam
java -Djava.io.tmpdir=tmp -Xmx8g -jar SortSam.jar INPUT=2-139.sam OUTPUT=2-139.sorted.bam SORT_ORDER=coordinate
#optional local realignment steps that were not necessary for HBV
#echo "Add groups"
#java -jar AddOrReplaceReadGroups.jar INPUT=$PREFX.sorted.dedup.bam OUTPUT=$PREFX.sorted.dedup.grouped.bam RGID= RGLB= RGPL= RGPU= RGSM=
#echo "Local realignment"
#java -jar GenomeAnalysisTK.jar -T RealignerTargetCreator -R $REFPATH -I $PREFX.sorted.dedup.bam -o $PREFX.sorted.dedup.grouped.intervals
#mark read duplicates
echo "Mark duplicates"
java -jar MarkDuplicates.jar INPUT=$PREFX.sorted.bam OUTPUT=$PREFX.sorted.dedup.bam METRICS_FILE=$PREFX_metrics.txt
#Filter sam file to keep only concordant alignments:
echo "Retrieve concordant reads"
perl $SCRIPTPATH/filter_sam.pl $DATAPATH/$PREFX.sam > $DATAPATH/$PREFX.filtered.sam
#Keep uniquely mapped reads (can use -bp to output bam file)
echo "Retrieve unique reads"
samtools view -q 1 -S $DATAPATH/$PREFX.filtered.sam > $DATAPATH/$PREFX.filtered.unique.sam
#splits sam file into individual barcodes
echo "Processing Individual genomes"
perl $SCRIPTPATH/Split_sam.pl $DATAPATH/$PREFX.barcoded_2_clipped_pf.fastq $DATAPATH/$PREFX.barcoded_2.fastq $DATAPATH/$PREFX.filtered.unique.sam $READSPERBRCD $REFNAME $REFLEN
#Convert sam to bam, then sort:
perl $SCRIPTPATH/Sam_to_bam.pl $DATAPATH
#Remove read duplicates:
echo "Calling SNPs"
perl $SCRIPTPATH/Rmdup.pl $DATAPATH
#Create vcf files:
perl $SCRIPTPATH/Bam_to_vcf_lofreq.pl $REFPATH $DATAPATH
#Create coverage pileups
echo "Coverage Pileups"
perl $SCRIPTPATH/Bam_to_mpileup.pl $REFPATH $DATAPATH
#Summarizing reads per barcode
echo "Pairs per sam"
perl $SCRIPTPATH/PairsPerBarcode.pl $DATAPATH
#Summarizing covered bases per barcode
echo "Reads over 4x"
perl $SCRIPTPATH/CovPerBarcode.pl $DATAPATH $COVREQ
#Plottng QC figures
echo "Plot QC"
cp $SCRIPTPATH/Script.r $DATAPATH/
./Script.r
echo "01_BAsE-Seq_alignment.sh run complete. Proceed to manual QC."
|
OliviaZhu26/Single_virion_seq
|
01_BAsE-Seq_alignment.sh
|
Shell
|
mit
| 4,317 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "${PODS_ROOT}/$1")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "../ARWebServerActivity/ARWebServerActivity.bundle"
install_resource "GCDWebServer/GCDWebUploader/GCDWebUploader.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "../ARWebServerActivity/ARWebServerActivity.bundle"
install_resource "GCDWebServer/GCDWebUploader/GCDWebUploader.bundle"
fi
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
alexruperez/ARWebServerActivity
|
Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
mit
| 5,000 |
#!/bin/bash
isMute=$( /usr/bin/osascript -e 'output muted of (get volume settings)' )
if [ $isMute == "true" ]; then
echo "muted"
else
curVolume=$(osascript -e 'output volume of (get volume settings)')
echo $curVolume
fi
|
gregdaynes/dotfiles
|
.config/Ubersicht/widgets/Statusbar-master/widgets/scripts/getVolumeStat.sh
|
Shell
|
mit
| 229 |
gcc -v -o voice-command voice-command.c -DMODELDIR=\"`pkg-config --variable=modeldir pocketsphinx`\" `pkg-config --cflags --libs pocketsphinx sphinxbase` -I/usr/local/include
|
baluubas/voice-command
|
src/linux/voice-command/build.sh
|
Shell
|
mit
| 175 |
#!/bin/bash
KEYWORDS_NEWYORK="New(| )York"
KEYWORDS_NYC="New(| )York(| )City|\bNYC|New(| )York(|,)(| )New(| )York|Manhattan|Brooklyn|Queens(|,)(| )New(| )York|Chrysler(| )Building|Bank(| )of(| )America(| )Tower|Big(| )Apple"
KEYWORDS_ALBANY="Albany"
KEYWORDS_ALBANY_EXCLUDE="Albany(|,)(| )(GA|Georgia)"
KEYWORDS_EMPIRESTATE="Empire(| )State(| )Building"
KEYWORDS_NYC_EXCLUDE="$KEYWORDS_EMPIRESTATE"
KEYWORDS_NYC_ALL="$KEYWORDS_NYC|$KEYWORDS_NYC_EXCLUDE"
KEYWORDS_NEWYORK_EXCLUDE="$KEYWORDS_NYC_ALL|$KEYWORDS_ALBANY"
KEYWORDS_NEWYORK_ALL="$KEYWORDS_NEWYORK|$KEYWORDS_ALBANY|$KEYWORDS_NYC_ALL"
if [ "$1" == "" ];
then
debug_start "New York"
NEWYORK=$(egrep -i "$KEYWORDS_NEWYORK" "$NEWPAGES" | egrep -iv "$KEYWORDS_NEWYORK_EXCLUDE")
NYC=$(egrep -i "$KEYWORDS_NYC" "$NEWPAGES" | egrep -iv "$KEYWORDS_NYC_EXCLUDE")
ALBANY=$(egrep -i "$KEYWORDS_ALBANY" "$NEWPAGES" | egrep -iv "$KEYWORDS_ALBANY_EXCLUDE")
EMPIRESTATE=$(egrep -i "$KEYWORDS_EMPIRESTATE" "$NEWPAGES")
categorize "NEWYORK" "New York"
categorize "NYC" "New York City"
categorize "ALBANY" "Albany, New York"
categorize "EMPIRESTATE" "Empire State Building"
debug_end "New York"
fi
|
MW-autocat-script/MW-autocat-script
|
catscripts/Government/Countries/United_States/US_states/New_York/NewYork.sh
|
Shell
|
mit
| 1,163 |
#!/usr/bin/env bash
rsync -av --delete /mnt/HDD1A/ /mnt/HDD1B
|
tiborsimon/dotfiles
|
scripts/images/backup.bash
|
Shell
|
mit
| 63 |
#!/bin/bash
set -e
SERVICE_ENV=$1
if [ -z ${AWS_REGION+x} ]; then
echo "Please set a region";
exit 0;
fi
if [ -z "$SERVICE_ENV" ]; then
echo "Please set service environment ./deploy.sh staging";
exit 0;
fi
echo "Deploying to stage $SERVICE_ENV"
# use the serverless version installed in the project
./node_modules/.bin/sls deploy --stage $SERVICE_ENV --region $AWS_REGION --verbose
|
serverless-examples/todo-serverless
|
aws/deploy.sh
|
Shell
|
mit
| 397 |
#!/bin/bash
main() {
echo '* uploading'
local path="$LOCAL_TARGET"
if [ -z "$path" ]; then
echo 'Missing LOCAL_TARGET env'
exit 1
fi
acd_cli upload --overwrite $path/* /
}
main "$@"
|
peterdemartini/butterfinger-docker
|
acd-cli/upload.sh
|
Shell
|
mit
| 202 |
PATH="$PATH:$PWD/cabal-dev/bin/"
VERSION=$(awk '/^version:/{print $2}' knob.cabal)
CABAL_DEV=$(which cabal-dev)
XZ=$(which xz)
require_cabal_dev()
{
if [ -z "$CABAL_DEV" ]; then
echo "Can't find 'cabal-dev' executable; make sure it exists on your "'$PATH'
echo "Cowardly refusing to fuck with the global package database"
exit 1
fi
}
clean_dev_install()
{
require_cabal_dev
rm -rf dist
$CABAL_DEV install || exit 1
}
|
fujimura/knob
|
scripts/common.bash
|
Shell
|
mit
| 434 |
#!/usr/bin/env bash
cd ../
git add . && git commit -m 'deploy' && git push origin master
# stop & remove all docker containers
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
# delete all images
docker rmi $(docker images -q)
# remove container
# remove old src and upload new src
rm -rf src/github.com/Zhanat87
rm bin/go
go get -u github.com/Zhanat87/go
cd src/github.com/Zhanat87/go/ && go install && cd ../../../../
# create new docker image and push to docker hub
docker build -t zhanat87/golang .
docker push zhanat87/golang
# list of all docker images on host machine
docker images
echo "deploy2 success"
# simple docker golang with drone.io deploy
|
Zhanat87/go
|
bash/deploy2.sh
|
Shell
|
mit
| 670 |
#!/bin/sh
OS=`dirname $0`"/../"
. $OS/scripts/common.sh
java_cli net.violet.platform.xmpp.JabberComponentManagerPacket "$@"
|
sebastienhouzet/nabaztag-source-code
|
server/OS/scripts/jabberComponentManagerPacket.sh
|
Shell
|
mit
| 125 |
#!/usr/bin/env bash
OUTPUT="/tmp/zookindialog.txt"
dialog --backtitle "Zookin TUI" --nook --nocancel --separate-widget " " \
--begin 4 4 --inputbox "Plaintext: " 8 100 \
--and-widget \
--begin 8 8 --inputbox "Multiplier (Coprime with 26): " 8 100 \
--and-widget \
--begin 12 12 --inputbox "Shift Value (0-25): " 8 100 2>$OUTPUT
CODE=$?
MENU=$(cat $OUTPUT)
IFS=" "
SWITCH=1
for i in $MENU; do
case $SWITCH in
1)
A=$i
SWITCH=2
;;
2)
B=$i
SWITCH=3
;;
3)
C=$i
SWITCH=4
;;
esac
done
unset IFS
CIPHER=$(zookin-affine -e -c "$A" -1 "$B" -2 "$C")
dialog --backtitle "Zookin TUI" --msgbox "Encrypted text: ${CIPHER^^}" 8 100
|
bufutda/zookin
|
lib/dialog/affine/encrypt.sh
|
Shell
|
mit
| 778 |
#!/bin/sh
# Helper script for pull-tester.
#Param 1: path to ronpaulcoin srcroot
#Param ...: arguments for build-test.sh
if [ $# -lt 1 ]; then
echo "usage: $0 [ronpaulcoin srcroot] build-test arguments..."
fi
cd $1
shift
./autogen.sh
./configure
./qa/pull-tester/build-tests.sh "$@"
|
corefork/ronpaulcoincore
|
qa/pull-tester/pull-tester.sh
|
Shell
|
mit
| 288 |
#!/bin/bash
echo "Generating list of names and object types and expouse times "
for NAME in $(ls C*.fits); do
#echo $NAME
echo "${NAME%.*}"
dfits $NAME | grep OBJECT
dfits $NAME | grep EXPTIME
#fitsort $NAME OBJECT,EXPTIME
# could maybe auto generate lists but going to jsut copy and paste from the generated list today.
done
|
jason-neal/equanimous-octo-tribble
|
octotribble/extraction/list_all_creator.sh
|
Shell
|
mit
| 358 |
#!/bin/bash
cmd="psql template1 --tuples-only --command \"select count(*) from pg_database where datname = 'squash';\""
db_exists=`eval $cmd`
if [ $db_exists -eq 0 ] ; then
cmd="createdb squash;"
eval $cmd
fi
psql squash -f schema/namespace.sql
cp csv/teams.csv /tmp/teams.csv
psql squash -f loaders/teams.sql
rm /tmp/teams.csv
cp csv/matches.csv /tmp/matches.csv
psql squash -f loaders/matches.sql
rm /tmp/matches.csv
cp csv/match_outcomes.csv /tmp/match_outcomes.csv
psql squash -f loaders/match_outcomes.sql
rm /tmp/match_outcomes.csv
cp csv/match_games.csv /tmp/match_games.csv
psql squash -f loaders/match_games.sql
rm /tmp/match_games.csv
psql squash -f loaders/match_sets.sql
|
octonion/squash
|
csa/load.sh
|
Shell
|
mit
| 699 |
<<<<<<< HEAD
# Requires sddsplot
# Argument one is the root filename of the simulation.
# Argument two and argument three are additional arguments for sddsplot
=======
# Requires sddsplot
# Argument one is the root filename of the simulation.
# Argument two and argument three are additional arguments for sddsplot
>>>>>>> 114b49eb133b17b49333f96f7269d629e0cf3532
sddsplot $2 -legend -column=s,betax -column=s,betay -graph=line,vary \
$1.twi -column=s,Profile -graph=line,type=7 -overlay=xmode=norm,yfact=0.1,yoffset=5 $1.mag -title='' -topline=' ' $3
|
ChrisProkop/NIU-beamtools
|
Elegant_BeamlineBeta.sh
|
Shell
|
mit
| 561 |
#!/bin/bash
echo "no-op"
|
dvcs/gitignore
|
.github/scripts/build-parent.sh
|
Shell
|
mit
| 26 |
#!/usr/bin/env bash
# PAGER / LESS
function configure() {
# Colored man pages: http://linuxtidbits.wordpress.com/2009/03/23/less-colors-for-man-pages/
# Less Colors for Man Pages
export LESS_TERMCAP_mb=$'\E[01;31m' # begin blinking
export LESS_TERMCAP_md=$'\E[01;38;5;74m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # end mode
export LESS_TERMCAP_se=$'\E[0m' # end standout-mode
export LESS_TERMCAP_so=$'\E[38;5;016m\E[48;5;220m' # begin standout-mode - info box
export LESS_TERMCAP_ue=$'\E[0m' # end underline
export LESS_TERMCAP_us=$'\E[04;38;5;146m' # begin underline
}
configure
|
edouard-lopez/dotfiles
|
includes/posix/pager.bash
|
Shell
|
mit
| 662 |
#!/bin/bash -x
echo "Hello bash"
exit 0
|
teerasarn/project1
|
app/config/deploy/user-scripts/test.sh
|
Shell
|
mit
| 42 |
#!/usr/bin/env bash
. ./script/env.sh
. ./script/functions.sh
VERSION=3.0
SHA256=947b7b0485215f72e14bb8936c847abb583253c597f58234650922270259049c
file_path=$(f_install_file http://sourceforge.net/projects/kpcli/files/kpcli-$VERSION.pl $SHA256)
if [[ ! -e $file_path ]]; then
echo 'Remote file is invalid'
exit 1
fi
chmod +x $file_path
f_link_command $file_path kpcli.pl
PERL_PACKAGES="Crypt::Rijndael
Term::ReadKey
Sort::Naturally
File::KeePass
Term::ShellUI
Term::ReadLine::Gnu
Clipboard
Capture::Tiny
Data::Password
Clone
XML::Parser
Term::ReadLine
Term::ReadKey
Sub::Install"
for p in $PERL_PACKAGES; do
perl -MCPAN -e "notest install '$p'"
done
PATCH=$(pwd)/Xclip.patch
cd ~/perl5/lib/perl5/Clipboard && { patch < $PATCH; cd -; }
|
lfont/dotfiles
|
kpcli/bin/install-kpcli.sh
|
Shell
|
mit
| 943 |
if [ "$#" -ne 1 ];then
echo "wrong input format"
echo "example: ./plot.sh map-reduce.dax"
exit 1
fi
input=$1
prefix=`echo ${input}| cut -d '.' -f 1`
path=../src/Pegasus/dax2dot
if [ "${path}" == "" ];then
echo "pegasus is not properly installed"
echo "need to run: export PATH=${PATH}:/path/to/dax2dot "
echo "you also need to install graphviz to use dot"
exit 1
fi
${path} -f ${input} -o ${prefix}.dot
dot -Tpng ${prefix}.dot -o ${prefix}.png
|
applicationskeleton/Skeleton
|
util/plot.sh
|
Shell
|
mit
| 477 |
# shellcheck shell=bash
function __dotfiles_exports_rbenv() {
if ! is-command brew; then
return
fi
RBENV_ROOT="$(get-brew-prefix-path rbenv)"
if [ -d "$RBENV_ROOT" ]; then
# Contains Ruby versions and shims.
export RBENV_ROOT
# Initialize rbenv.
eval "$(rbenv init -)"
# Set global version if one isn't already set.
if [ -z "$(rbenv global)" ]; then
rbenv global 2.7.2
fi
fi
}
__dotfiles_exports_rbenv
|
wesm87/dotfiles
|
exports/rbenv.sh
|
Shell
|
mit
| 456 |
## Command history configuration
if [ -z $HISTFILE ]; then
HISTFILE=$HOME/.zsh_history
fi
HISTSIZE=10000
SAVEHIST=10000
setopt append_history
setopt extended_history
setopt hist_expire_dups_first
setopt hist_ignore_dups # ignore duplication command history list
setopt hist_ignore_space
setopt hist_verify
setopt inc_append_history
# setopt share_history # share command history data
|
gvt/oh-my-zsh
|
lib/history.zsh
|
Shell
|
mit
| 389 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2791-1
#
# Security announcement date: 2015-11-04 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:52 UTC
#
# Operating System: Ubuntu 15.10
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libnss3:2:3.19.2.1-0ubuntu0.15.10.1
#
# Last versions recommanded by security team:
# - libnss3:2:3.23-0ubuntu0.15.10.1
#
# CVE List:
# - CVE-2015-7181
# - CVE-2015-7182
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libnss3=2:3.23-0ubuntu0.15.10.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_15.10/x86_64/2015/USN-2791-1.sh
|
Shell
|
mit
| 660 |
#
# Handling of the bearer token used for authentication.
#
rms_config add-settings token_auto_mode token_auto_create_func token_timeout
rms_token() {
case "$1" in
get) echo -n "$_RMS_token" ;;
clear|set)
_rms_subshell_warning
unset _RMS_token _RMS_token_time
;;&
clear) ;;
set)
if [ "$3" != "" ]; then
if [[ "$3" =~ ^[0-9]+$ ]]; then
_RMS_token_time="$3"
elif [ "$3" = "now" ]; then
_RMS_token_time="$(date +%s)"
else
echo "Error: Invalid token time given: $3" >&2
return 1
fi
fi
_RMS_token="$2"
;;
put)
if ! [[ "$2" =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]]; then
echo "Error: Cannot put to an invalid variable name." >&2
return 1
fi
eval "$2"'=$_RMS_token'
;;
create)
local mode
rms_output put-mode mode
${_RMS_config[token_auto_create_func]:-rms_lidp_login}
local ok=$?
rms_output mode "$mode"
return $ok
;;
check)
local mode
rms_output put-mode mode
rms_check_token --no-error
local ok=$?
rms_output mode "$mode"
return $ok
;;
timed-out)
# For this command, 0 means timed out or error, 1 means not.
if [ "$_RMS_token_time" = "" ]; then
return 0
fi
if ! [[ "$_RMS_token_time" =~ ^[0-9]+$ ]]; then
echo "Error: Invalid token time: $_RMS_token_time" >&2
return 0
fi
local time="$2"
if [ "$time" = "" ]; then
time="$(date +%s)"
fi
if ! [[ "$time" =~ ^[0-9]+$ ]]; then
echo "Error: Invalid time: $time" >&2
return 0
fi
local timeout="${_RMS_config[token_timeout]:-30}"
if ! [[ "$timeout" =~ ^[0-9]+$ ]]; then
echo "Error: Invalid timeout: $timeout" >&2
return 0
fi
if [ $_RMS_token_time -le $(($time - $timeout)) ]; then
return 0
fi
return 1
;;
auto-create)
_rms_subshell_warning
local mode="${_RMS_config[token_auto_mode]:-check}"
case "$mode" in
never)
return 0
;;
always)
rms_token create
return $?
;;
empty)
if [ "$_RMS_token" = "" ]; then
rms_token create
return $?
fi
return 0
;;
check|timeout)
local time="$(date +%s)"
if [ "$_RMS_token" != "" ] && ! rms_token timed-out "$time"
then
return 0
fi
;;&
check)
if [ "$_RMS_token" != "" ] && rms_token check
then
_RMS_token_time="$time"
return 0
fi
;&
check|timeout)
rms_token create
local ok=$?
[ $ok -eq 0 ] && _RMS_token_time="$time"
return $ok
;;
*)
echo "Error: Unknown token auto mode: $mode" >&2
return 1
;;
esac
;;
*)
echo "Error: Unknown rms_token subcommand: $1" >&2
return 1
;;
esac
return 0
}
|
edorfaus/dm-rms-bash-client
|
core/token.sh
|
Shell
|
mit
| 2,703 |
#!/bin/sh
SUBAPP=$1
APP="/usr/bin/ansible"
if test -e "/usr/bin/ansible-$SUBAPP";then
APP="/usr/bin/ansible-$SUBAPP"
shift
fi
USERID=$(stat -c '%u' /work)
GROUPID=$(stat -c '%g' /work)
if [ x$ANSIBLE_VAULT_PASSWORD != 'x' ]; then
echo $ANSIBLE_VAULT_PASSWORD >/vault
chmod 644 /vault
export ANSIBLE_VAULT_PASSWORD_FILE=/vault
fi
# check if work-directory is owned by root or another user.
# if another user, we create one with the correct UID/GID and run
# the command as this user. if the command creates a file it will belong
# to the same user as the owner of the mounted /work
if [ $USERID = '0' ]; then
$APP "$@"
else
USERNAME=ansible
GROUPNAME=ansible
addgroup -S -g $GROUPID $GROUPNAME
echo "$USERNAME:x:$USERID:$USERID::/home/$USERNAME:" >> /etc/passwd
echo "$USERNAME:!:$(($(date +%s) / 60 / 60 / 24)):0:99999:7:::" >> /etc/shadow
echo "$USERNAME:x:$USERID:" >> /etc/group
mkdir /home/$USERNAME
chown ansible:ansible /home/$USERNAME
if [ ! -z "$SSH_AUTH_SOCK" ]
then
chown ansible:ansible $SSH_AUTH_SOCK
fi
export HOME=/home/$USERNAME
sudo -u ansible -E $APP "$@"
fi
|
ulrichSchreiner/ansible-docker
|
entry.sh
|
Shell
|
mit
| 1,126 |
find -name "*.java" > sources.txt
javac @sources.txt
rm sources.txt
|
Nirespire/SimpleBittorrent
|
submission/compile.sh
|
Shell
|
mit
| 67 |
# vagrant setup
vagrant box add trusty https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box
vagrant up
#vagrant ssh
|
nnsnodnb/vagrant-trusty-rstudio-server
|
setup.sh
|
Shell
|
mit
| 165 |
#!/usr/bin/env bash
cd $(dirname $(readlink -f $0))
source ../../utils/libdeploy.bash
# linking config directory
link_package \
./config/config ${HOME}/.config/lemon-modules
execute_with_privilege rm -rf /etc/acpi
execute_with_privilege cp -r ./config/acpi /etc/acpi
# linking systemd unit files
link_package \
./config/lemon-modules.service ${HOME}/.config/systemd/user/lemon-modules.service \
./config/lemon-modules-scheduler.service ${HOME}/.config/systemd/user/lemon-modules-scheduler.service \
./config/lemon-modules-scheduler.timer ${HOME}/.config/systemd/user/lemon-modules-scheduler.timer
# linking command scripts
link_package \
./scripts/server.bash ${HOME}/.local/bin/lemon-modules-server \
./scripts/update.bash ${HOME}/.local/bin/lemon-modules-update \
./scripts/init.bash ${HOME}/.local/bin/lemon-modules-init \
./scripts/click-handler.bash ${HOME}/.local/bin/lemon-modules-click-handler \
./scripts/scheduler.bash ${HOME}/.local/bin/lemon-modules-scheduler
|
tiborsimon/dotfiles
|
configs/lemon-modules/02_deploy_config.bash
|
Shell
|
mit
| 1,034 |
#!/bin/sh
curl -X GET http://localhost:8080/v3/resources/Package --data-urlencode [email protected]
|
clarenceb/puppetdb_spike
|
queries/RESOURCES_package_wildcard_search.sh
|
Shell
|
mit
| 117 |
#!/usr/bin/env bash
# for "true full screen" call the script with "fullscreen" as the first argument
TRUE_FULL_SCREEN="$1"
TMUX="$(which tmux)"
start_terminal_and_run_tmux() {
x-terminal-emulator -e ./linux_shell_activator.sh $TMUX
}
resize_window_to_full_screen() {
echo "Resize is up to terminal emulator being used." > /dev/null 2>&1
}
resize_to_true_full_screen() {
echo "Resize is up to terminal emulator being used." > /dev/null 2>&1
}
main() {
start_terminal_and_run_tmux
if [ "$TRUE_FULL_SCREEN" == "fullscreen" ]; then
resize_to_true_full_screen
else
resize_window_to_full_screen
fi
}
main
|
insanepaul/tmux-continuum
|
scripts/handle_tmux_automatic_start/linux_terminal_start_tmux.sh
|
Shell
|
mit
| 615 |
#sed -i -e 's/\r$//' your_script.sh
sudo yum install -y wget httpd php gcc glibc glibc-common gd gd-devel make net-snmp unzip
cd /tmp
sudo wget https://github.com/scorpio7x/nagios/raw/master/nagios-4.2.0.tar.gzip
sudo wget https://github.com/scorpio7x/nagios/raw/master/nagios-plugins-2.1.2.tar.gzip
#wget https://github.com/scorpio7x/nagios/blob/master/nagios-4.3.4.tar.gzip?raw=true -O nagios-4.3.4.tar.gzip
#wget https://github.com/scorpio7x/nagios/blob/master/nagios-3.5.1.tar.gzip?raw=true -O nagios-3.5.1.tar.gzip
#wget https://github.com/scorpio7x/nagios/blob/master/nagios-plugins-2.2.1.tar.gzip?raw=true -O nagios-plugins-2.2.1.tar.gzip
sudo useradd nagios
sudo groupadd nagcmd
sudo usermod -a -G nagcmd nagios
tar -xvf nagios-4.2.0.tar.gzip
tar -xvf nagios-plugins-2.1.2.tar.gzip
cd nagios-4.2.0
sudo ./configure --with-command-group=nagcmd
sudo make all
sudo make install
sudo make install-init
sudo make install-commandmode
sudo make install-config
sudo make install-webconf
#vi /usr/local/nagios/etc/objects/contacts.cfg
clear
echo "THIET LAP MAT KHAU CHO ADMIN NAGIOS"
sudo htpasswd -s -c /usr/local/nagios/etc/htpasswd.users nagiosadmin
sudo systemctl start httpd.service
cd ..
cd nagios-plugins-2.1.2
sudo ./configure --with-nagios-user=nagios --with-nagios-group=nagios
sudo make
sudo make install
sudo /usr/local/nagios/bin/nagios -v /usr/local/nagios/etc/nagios.cfg
sudo chkconfig --add nagios
sudo chkconfig --level 35 nagios on
sudo chkconfig --add httpd
sudo chkconfig --level 35 httpd on
sudo systemctl enable nagios
sudo systemctl enable httpd
sudo systemctl start nagios.service
sudo rm -frv /tmp/nagios*
clear
echo "CAI DAT NAGIOS CORE DA HOAN TAT"
echo
echo "DIA CHI DANG NHAP: http://$(ip addr | grep 'state UP' -A2 | tail -n1 | awk '{print $2}' | cut -f1 -d'/')/nagios"
echo
echo "ACCOUNT LOGIN: nagiosadmin"
echo
echo "PASSWORD BAN DA THIET LAP TRUOC DO"
echo
|
scorpio7x/nagios
|
Install-Nagios-CentOS7.sh
|
Shell
|
mit
| 1,932 |
#!/bin/bash
#
# Download cloudfront gzipped logs and unpack them to a directory
# Edit this
S3_CONFIG="/root/.s3cfg"
BUCKET="s3://bucket.name"
LOCAL_GZIP="/mnt/cloudfront_gzip"
GZIP_PATH="/mnt/gzip"
S3_GZIP_LOG="/mnt/s3_gzip.log"
LOCAL_GZIP_LOG="/mnt/local_gzip.log"
COMPARE_LOG="/mnt/compare.log"
GZIP_DOWNLOAD_LOG="/mnt/download_gzip.log"
GZIP_DOWNLOADED="/mnt/downloaded.log"
DONE="/mnt/done.log"
# Remove old logs
rm -f $S3_GZIP_LOG $LOCAL_GZIP_LOG $COMPARE_LOG $GZIP_DOWNLOAD_LOG $GZIP_DOWNLOADED $DONE
# List what's on localhost and what's on s3
/usr/bin/s3cmd -c $S3_CONFIG ls $BUCKET | cut -d/ -f 5 | sort >> $S3_GZIP_LOG
find $LOCAL_GZIP -type f -name "*.gz" | cut -d/ -f 4 | sort >> $LOCAL_GZIP_LOG
# Compare the lists and ignore what's on local
comm --nocheck-order -23 $S3_GZIP_LOG $LOCAL_GZIP_LOG >> $COMPARE_LOG
# Add the s3 path to beginning of each line
perl -pe "print '$BUCKET'" $COMPARE_LOG >> $GZIP_DOWNLOAD_LOG
# Download what's missing from s3 in parallel
# -j10 = 10 concurrent threads
# -N1 = 1 argument from stdin, assigned to {1}
# {1} = that argument (The URL from the logsfile)
# Get parallel from http://www.gnu.org/software/parallel/
cat $GZIP_DOWNLOAD_LOG | parallel -j10 -N1 --progress --no-notice /usr/bin/s3cmd -c $S3_CONFIG --no-progress get {1} $GZIP_PATH >> $GZIP_DOWNLOADED
# Make a list of downloaded files
cat $GZIP_DOWNLOADED | awk '{print $5}' | sed -e s/"'"/""/g >> $DONE
# Uncomment to remove downloaded S3 files from S3. Adding expiry rule is prefered instead.
# cat $GZIP_DOWNLOAD_LOG | parallel -j10 -N1 --no-notice /usr/bin/s3cmd -c $S3_CONFIG --no-progress del {1}
|
hmain/logstash
|
cloudfront_download.sh
|
Shell
|
mit
| 1,623 |
mongod --fork --logpath /var/log/mongodb.log
while true
do
Rscript runscenariossics.R
sleep 1
done
mongod --shutdown
|
jcliberatol/irtppscripts
|
runtestsics.sh
|
Shell
|
mit
| 116 |
bundle exec thin stop -e production -d
|
remonbonbon/rec-stats
|
stop.sh
|
Shell
|
mit
| 39 |
./build-application.sh
tar -cjf kilns.tar.bz2 kilns doc library
|
sellout/Kilns
|
make-distribution.sh
|
Shell
|
mit
| 64 |
#!/bin/bash
echo "**********************************************************************"
echo "copying apache configuration to sites-avialable..."
sudo cp /home/khannegan/apps/website/apache2/testsite.conf /etc/apache2/sites-available/testsite.conf
echo "**********************************************************************"
echo "enabling test configuration..."
sudo a2ensite testsite
echo "**********************************************************************"
echo "restarting server..."
sudo service apache2 restart
if ! pidof apache2
then
echo "ERROR: apache2 server restart FAILED!!!"
else
echo "website deployed"
fi
echo "**********************************************************************"
echo "lauching error log for tracking. Ctrl-C to resume..."
sudo tail -f /var/log/apache2/error.log -n 40
|
kevin-hannegan/vps-droplet
|
website/bin/test_reload.sh
|
Shell
|
mit
| 820 |
#!/bin/bash
# This script automatically sets the version and short version string of
# an Xcode project from the Git repository containing the project.
#
# To use this script in Xcode, add the contents to a "Run Script" build
# phase for your application target.
# NOTE: make sure you add a new value to your Info plist called "FullVersion"
#
# NOTE: if you receive error saying 'fatal: Not a git responsitory', make sure |--git-dir| points
# to the root dir of your repo
#
# NOTE: if your git repo has no tags, this would fail, so make sure you create at least one tag
# (I usually create a tag '0.0.0' at the very first commit, you can do the same).
set -o errexit
set -o nounset
INFO_PLIST="${INFOPLIST_FILE}"
echo "$INFO_PLIST"
# Get git tag and hash in the FullVersion
FULL_VERSION=$(git --git-dir="${PROJECT_DIR}/.git" --work-tree="${PROJECT_DIR}/" describe --dirty | sed -e 's/^v//' -e 's/g//')
# Use the latest tag for short version (You'll have to make sure that all your tags are of the format 0.0.0,
# this is to satisfy Apple's rule that short version be three integers separated by dots)
# using git tag for version also encourages you to create tags that match your releases
SHORT_VERSION=$(git --git-dir="${PROJECT_DIR}/.git" --work-tree="${PROJECT_DIR}/" describe --abbrev=0 --tags | sed -e 's/^v//' -e 's/g//')
# I'd like to use the Git commit hash for CFBundleVersion.
# VERSION=$(git --git-dir="${PROJECT_DIR}/.git" --work-tree="${PROJECT_DIR}" rev-parse --short HEAD)
# But Apple wants this value to be a monotonically increasing integer, so
# instead use the number of commits on the master branch. If you like to
# play fast and loose with your Git history, this may cause you problems.
# Thanks to @amrox for pointing out the issue and fix.
VERSION=$(git --git-dir="${PROJECT_DIR}/.git" --work-tree="${PROJECT_DIR}/" rev-list master | wc -l)
/usr/libexec/PlistBuddy -c "Set CFBundleShortVersionString $SHORT_VERSION" $INFO_PLIST
/usr/libexec/PlistBuddy -c "Set CFBundleVersion $VERSION" $INFO_PLIST
/usr/libexec/PlistBuddy -c "Set FullVersion $FULL_VERSION" $INFO_PLIST
echo "VERSION: ${VERSION}"
echo "SHORT VERSION: ${SHORT_VERSION}"
echo "FULL VERSION: ${FULL_VERSION}"
|
petester42/hockey-playoffs
|
Scripts/update-build.sh
|
Shell
|
mit
| 2,206 |
#!/bin/bash
# Script per generare il sito e pubblicare il sito statico così generato su master
echo "> Generate static site..."
bundle exec jekyll build
cd _site
SITE_DIR=`pwd`
cd ~/tmp/
echo "> Clone repo..."
git clone [email protected]:TechIsFun/techisfun.github.io.git
cd techisfun.github.io
echo "> Copy files..."
cp -r $SITE_DIR/* .
echo "> Commit..."
git add -A
git commit -a -m "updated static site"
echo "> Push..."
git push origin master
cd ..
echo "> Cleanup..."
rm -rf techisfun.github.io
echo "> Done"
|
TechIsFun/techisfun.github.io
|
run_publish.sh
|
Shell
|
mit
| 517 |
#!/bin/bash
# This is based on "preexec.bash" but is customized for iTerm2.
# Note: this module requires 2 bash features which you must not otherwise be
# using: the "DEBUG" trap, and the "PROMPT_COMMAND" variable. iterm2_preexec_install
# will override these and if you override one or the other this _will_ break.
# This is known to support bash3, as well as *mostly* support bash2.05b. It
# has been tested with the default shells on MacOS X 10.4 "Tiger", Ubuntu 5.10
# "Breezy Badger", Ubuntu 6.06 "Dapper Drake", and Ubuntu 6.10 "Edgy Eft".
# tmux and screen are not supported; even using the tmux hack to get escape
# codes passed through, ncurses interferes and the cursor isn't in the right
# place at the time it's passed through.
if [[ "$TERM" != screen && "$ITERM_SHELL_INTEGRATION_INSTALLED" = "" && "$-" == *i* ]]; then
ITERM_SHELL_INTEGRATION_INSTALLED=Yes
# Saved copy of your PS1. This is used to detect if the user changes PS1
# directly. ITERM_PREV_PS1 will hold the last value that this script set PS1 to
# (including various custom escape sequences).
ITERM_PREV_PS1="$PS1"
# This variable describes whether we are currently in "interactive mode";
# i.e. whether this shell has just executed a prompt and is waiting for user
# input. It documents whether the current command invoked by the trace hook is
# run interactively by the user; it's set immediately after the prompt hook,
# and unset as soon as the trace hook is run.
ITERM_PREEXEC_INTERACTIVE_MODE=""
# Default do-nothing implementation of preexec.
function preexec () {
true
}
# Default do-nothing implementation of precmd.
function precmd () {
true
}
# This function is installed as the PROMPT_COMMAND; it is invoked before each
# interactive prompt display. It sets a variable to indicate that the prompt
# was just displayed, to allow the DEBUG trap, below, to know that the next
# command is likely interactive.
function iterm2_preexec_invoke_cmd () {
# Ideally we could do this in iterm2_preexec_install but CentOS 7.2 and
# RHEL 7.2 complain about bashdb-main.inc not existing if you do that
# (issue 4160).
# *BOTH* of these options need to be set for the DEBUG trap to be invoked
# in ( ) subshells. This smells like a bug in bash to me. The null stackederr
# redirections are to quiet errors on bash2.05 (i.e. OSX's default shell)
# where the options can't be set, and it's impossible to inherit the trap
# into subshells.
set -o functrace > /dev/null 2>&1
shopt -s extdebug > /dev/null 2>&1
\local s=$?
last_hist_ent="$(HISTTIMEFORMAT= builtin history 1)";
precmd;
# This is an iTerm2 addition to try to work around a problem in the
# original preexec.bash.
# When the PS1 has command substitutions, this gets invoked for each
# substitution and each command that's run within the substitution, which
# really adds up. It would be great if we could do something like this at
# the end of this script:
# PS1="$(iterm2_prompt_prefix)$PS1($iterm2_prompt_suffix)"
# and have iterm2_prompt_prefix set a global variable that tells precmd not to
# output anything and have iterm2_prompt_suffix reset that variable.
# Unfortunately, command substitutions run in subshells and can't
# communicate to the outside world.
# Instead, we have this workaround. We save the original value of PS1 in
# $ITERM_ORIG_PS1. Then each time this function is run (it's called from
# PROMPT_COMMAND just before the prompt is shown) it will change PS1 to a
# string without any command substitutions by doing eval on ITERM_ORIG_PS1. At
# this point ITERM_PREEXEC_INTERACTIVE_MODE is still the empty string, so preexec
# won't produce output for command substitutions.
# The first time this is called ITERM_ORIG_PS1 is unset. This tests if the variable
# is undefined (not just empty) and initializes it. We can't initialize this at the
# top of the script because it breaks with liquidprompt. liquidprompt wants to
# set PS1 from a PROMPT_COMMAND that runs just before us. Setting ITERM_ORIG_PS1
# at the top of the script will overwrite liquidprompt's PS1, whose value would
# never make it into ITERM_ORIG_PS1. Issue 4532. It's important to check
# if it's undefined before checking if it's empty because some users have
# bash set to error out on referencing an undefined variable.
if [ -z "${ITERM_ORIG_PS1+xxx}" ]
then
# ITERM_ORIG_PS1 always holds the last user-set value of PS1.
# You only get here on the first time iterm2_preexec_invoke_cmd is called.
export ITERM_ORIG_PS1="$PS1"
fi
if [[ "$PS1" != "$ITERM_PREV_PS1" ]]
then
export ITERM_ORIG_PS1="$PS1"
fi
# Get the value of the prompt prefix, which will change $?
\local iterm2_prompt_prefix_value="$(iterm2_prompt_prefix)"
# Reset $? to its saved value, which might be used in $ITERM_ORIG_PS1.
sh -c "exit $s"
# Set PS1 to various escape sequences, the user's preferred prompt, and more escape sequences.
export PS1="\[$iterm2_prompt_prefix_value\]$ITERM_ORIG_PS1\[$(iterm2_prompt_suffix)\]"
# Save the value we just set PS1 to so if the user changes PS1 we'll know and we can update ITERM_ORIG_PS1.
export ITERM_PREV_PS1="$PS1"
sh -c "exit $s"
# This must be the last line in this function, or else
# iterm2_preexec_invoke_exec will do its thing at the wrong time.
ITERM_PREEXEC_INTERACTIVE_MODE="yes";
}
# This function is installed as the DEBUG trap. It is invoked before each
# interactive prompt display. Its purpose is to inspect the current
# environment to attempt to detect if the current command is being invoked
# interactively, and invoke 'preexec' if so.
function iterm2_preexec_invoke_exec () {
if [ ! -t 1 ]
then
# We're in a piped subshell (STDOUT is not a TTY) like
# (echo -n A; sleep 1; echo -n B) | wc -c
# ...which should return "2".
return
fi
if [[ -n "${COMP_LINE:-}" ]]
then
# We're in the middle of a completer. This obviously can't be
# an interactively issued command.
return
fi
if [[ -z "$ITERM_PREEXEC_INTERACTIVE_MODE" ]]
then
# We're doing something related to displaying the prompt. Let the
# prompt set the title instead of me.
return
else
# If we're in a subshell, then the prompt won't be re-displayed to put
# us back into interactive mode, so let's not set the variable back.
# In other words, if you have a subshell like
# (sleep 1; sleep 2)
# You want to see the 'sleep 2' as a set_command_title as well.
if [[ 0 -eq "$BASH_SUBSHELL" ]]
then
ITERM_PREEXEC_INTERACTIVE_MODE=""
fi
fi
if [[ "iterm2_preexec_invoke_cmd" == "$BASH_COMMAND" ]]
then
# Sadly, there's no cleaner way to detect two prompts being displayed
# one after another. This makes it important that PROMPT_COMMAND
# remain set _exactly_ as below in iterm2_preexec_install. Let's switch back
# out of interactive mode and not trace any of the commands run in
# precmd.
# Given their buggy interaction between BASH_COMMAND and debug traps,
# versions of bash prior to 3.1 can't detect this at all.
ITERM_PREEXEC_INTERACTIVE_MODE=""
return
fi
# In more recent versions of bash, this could be set via the "BASH_COMMAND"
# variable, but using history here is better in some ways: for example, "ps
# auxf | less" will show up with both sides of the pipe if we use history,
# but only as "ps auxf" if not.
hist_ent="$(HISTTIMEFORMAT= builtin history 1)";
\local prev_hist_ent="${last_hist_ent}";
last_hist_ent="${hist_ent}";
if [[ "${prev_hist_ent}" != "${hist_ent}" ]]; then
\local this_command="$(echo "${hist_ent}" | sed -e "s/^[ ]*[0-9]*[ ]*//g")";
else
\local this_command="";
fi;
# If none of the previous checks have earlied out of this function, then
# the command is in fact interactive and we should invoke the user's
# preexec hook with the running command as an argument.
preexec "$this_command";
}
# Execute this to set up preexec and precmd execution.
function iterm2_preexec_install () {
# Finally, install the actual traps.
if ( [ x"${PROMPT_COMMAND:-}" = x ]); then
PROMPT_COMMAND="iterm2_preexec_invoke_cmd";
else
# If there's a trailing semicolon folowed by spaces, remove it (issue 3358).
PROMPT_COMMAND="$(echo -n $PROMPT_COMMAND | sed -e 's/; *$//'); iterm2_preexec_invoke_cmd";
fi
# The $_ is ignored, but prevents it from changing (issue 3932).
trap 'iterm2_preexec_invoke_exec "$_"' DEBUG;
}
# -- begin iTerm2 customization
function iterm2_begin_osc {
printf "\033]"
}
function iterm2_end_osc {
printf "\007"
}
# Runs after interactively edited command but before execution
function preexec() {
iterm2_begin_osc
printf "133;C;"
iterm2_end_osc
# If PS1 still has the value we set it to in iterm2_preexec_invoke_cmd then
# restore it to its original value. It might have changed if you have
# another PROMPT_COMMAND (like liquidprompt) that modifies PS1.
if [ -n "${ITERM_ORIG_PS1+xxx}" -a "$PS1" = "$ITERM_PREV_PS1" ]
then
export PS1="$ITERM_ORIG_PS1"
fi
iterm2_ran_preexec="yes"
}
function precmd () {
# Work around a bug in CentOS 7.2 where preexec doesn't run if you press
# ^C while entering a command.
if [[ -z "${iterm2_ran_preexec:-}" ]]
then
preexec ""
fi
iterm2_ran_preexec=""
}
function iterm2_print_state_data() {
iterm2_begin_osc
printf "1337;RemoteHost=%s@%s" "$USER" "$iterm2_hostname"
iterm2_end_osc
iterm2_begin_osc
printf "1337;CurrentDir=%s" "$PWD"
iterm2_end_osc
iterm2_print_user_vars
}
# Usage: iterm2_set_user_var key value
function iterm2_set_user_var() {
iterm2_begin_osc
printf "1337;SetUserVar=%s=%s" "$1" $(printf "%s" "$2" | base64 | tr -d '\n')
iterm2_end_osc
}
if [ -z "$(type -t iterm2_print_user_vars)" ] || [ "$(type -t iterm2_print_user_vars)" != function ]; then
# iterm2_print_user_vars is not already defined. Provide a no-op default version.
#
# Users can write their own version of this function. It should call
# iterm2_set_user_var but not produce any other output.
function iterm2_print_user_vars() {
true
}
fi
function iterm2_prompt_prefix() {
iterm2_begin_osc
printf "133;D;\$?"
iterm2_end_osc
iterm2_print_state_data
iterm2_begin_osc
printf "133;A"
iterm2_end_osc
}
function iterm2_prompt_suffix() {
iterm2_begin_osc
printf "133;B"
iterm2_end_osc
}
function iterm2_print_version_number() {
iterm2_begin_osc
printf "1337;ShellIntegrationVersion=3;shell=bash"
iterm2_end_osc
}
# If hostname -f is slow on your system, set iterm2_hostname before sourcing this script.
if [ -z "${iterm2_hostname:-}" ]; then
iterm2_hostname=$(hostname -f)
fi
iterm2_preexec_install
# This is necessary so the first command line will have a hostname and current directory.
iterm2_print_state_data
iterm2_print_version_number
fi
|
mahi97/dotfiles
|
etc/.iterm2_shell_integration.bash
|
Shell
|
mit
| 11,709 |
################################################
SQL_PASSWORD=" " #MySQL master password #
################################################
clear
if [ $# -eq 1 ]; then
echo "Building configuration for $1"
sudo mkdir -p "/var/www/$1"
echo "server {
listen 80;
server_name $1 www.$1;
root /var/www/$1;
index index.php index.html index.htm;
location / {
try_files \$uri \$uri/ =404;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/local/www/nginx-dist;
}
location ~ \.php\$ {
try_files \$uri =404;
fastcgi_split_path_info ^(.+\.php)(/.+)\$;
fastcgi_pass unix:/var/run/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME \$request_filename;
include fastcgi_params;
}
}
" >> "/usr/local/etc/nginx/vhosts/$1.conf"
echo "Success!"
echo "Configuration file patch for $1 is /usr/local/etc/nginx/vhosts/$1.conf"
service nginx restart
exit 1
else
clear
echo "Start installing FEMP compontents on your FreeBSD"
sudo mkdir -p /usr/local/etc/nginx/vhosts
sudo mkdir -p /var/www
sudo pkg install -y nginx mysql56-server php56 php56-mysql nano expect
echo "mysql_enable=\"YES\"
nginx_enable=\"YES\"
php_fpm_enable=\"YES\"
sendmail_enable=\"YES\"" | sudo tee -a /etc/rc.conf > /dev/null
search="listen = 127.0.0.1:9000"
replace="listen = /var/run/php-fpm.sock"
sed -i "" "s|${search}|${replace}|g" /usr/local/etc/php-fpm.conf
echo "
listen.owner = www
listen.group = www
listen.mode = 0660
" | sudo tee -a /usr/local/etc/php-fpm.conf > /dev/null
sudo cp /usr/local/etc/php.ini-production /usr/local/etc/php.ini
echo "cgi.fix_pathinfo=0" | sudo tee -a /usr/local/etc/php.ini > /dev/null
sudo service php-fpm start
sudo service mysql-server start
expect -c "
set timeout 10
spawn mysql_secure_installation
expect \"Enter current password for root (enter for none):\"
send \"\r\"
expect \"Set root password?\"
send \"Y\r\"
expect \"New password:\"
send \"$SQL_PASSWORD\r\"
expect \"Re-enter new password:\"
send \"$SQL_PASSWORD\r\"
expect \"Remove anonymous users?\"
send \"y\r\"
expect \"Disallow root login remotely?\"
send \"y\r\"
expect \"Remove test database and access to it?\"
send \"y\r\"
expect \"Reload privilege tables now?\"
send \"y\r\"
expect eof
"
sudo service mysql-server restart
sudo service nginx start
:> /usr/local/etc/nginx/nginx.conf
sudo echo 'user www;
worker_processes 1;
error_log /var/log/nginx/error.log error;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
access_log /var/log/nginx/access.log;
sendfile on;
keepalive_timeout 65;
#virtual hosting
include /usr/local/etc/nginx/vhosts/*;
}' | sudo tee -a /usr/local/etc/nginx/nginx.conf > /dev/null
sudo mkdir -p /var/log/nginx
sudo touch /var/log/nginx/access.log
sudo touch /var/log/nginx/error.log
sudo rm /usr/local/www/nginx
sudo mkdir /usr/local/www/nginx
sudo service sendmail restart
sudo service nginx restart
echo "
All done!"
fi
|
s1lviu/FBWC
|
fbwc.sh
|
Shell
|
mit
| 3,227 |
#auto completion for git functions
__git_complete _g __git_main
__git_complete _gch _git_checkout
__git_complete _gp _git_pull
#aliases for autocompletions
alias gp="_gp"
alias gch="_gch"
# Add tab completion for navigating dotfiles functions
complete -F _cddf df
# Add tab completion for container names to docker functions
complete -o "default" -o "nospace" -W "$(docker ps --format \"{{.Names}}\")" docker docker_enter docker_enter_id docker_id docker_rm;
complete -o "default" -o "nospace" -W "$(docker images --format \"{{.ID}}\")" docker_rm_image;
|
dkoston/dotfiles
|
bash/autocomplete.sh
|
Shell
|
mit
| 556 |
#!/bin/bash
command -v jq >/dev/null 2>&1 || { echo >&2 "jq command must be available. See http://stedolan.github.io/jq/. Aborting."; exit 1; }
set -e
DEFAULT_API='api.datasift.com'
CC=${1:$CC}
DU=${2:-$DU}
DK=${3:-$DK}
API=${4:-$DEFAULT_API}
function ds(){
${CC} -a ${DU} ${DK} --u ${API} "$@" #| jq .
}
# core API - validate our hash, compile it, check our usage, dpu and balance
echo 'Validating CSDL'
csdl='interaction.content contains "music"'
valid=$(ds -e core -c validate -p csdl "$csdl" | jq .status)
if [ ${valid} != 200 ]; then
echo "Validating CSDL failed"
echo ${valid}
exit -1
fi
echo 'Compiling'
hash=$(ds -e core -c compile -p csdl "$csdl" | jq -r .body.hash)
echo "Compiled and got $hash"
echo 'Usage :'
ds -e core -c usage | jq .
echo 'DPU :'
ds -e core -c dpu -p hash $hash | jq .
echo 'Balance :'
ds -e core -c usage | jq .
echo 'Preparing Historic query'
end=`expr $(date +%s) - 7200`
start=`expr $end - 3600`
historic=$(ds -e historics -c prepare -p start ${start} -p end ${end} -p name "Historics CLI @ $start" -p hash ${hash})
echo ${historic} | jq .
historic_id=$(echo ${historic} | jq -r .body.id)
echo "Historic created with ID $historic_id"
echo 'Validating Push subscription'
push_v=$(ds -e push -c validate -p playback_id ${historic_id} -p name "Playback CLI @ $start" -p output_type http \
-p output_params.method post -p output_params.url 'http://ec2-50-19-63-138.compute-1.amazonaws.com:80' \
-p output_params.delivery_frequency 0 -p output_params.max_size 102400 -p output_params.auth.type none \
-p output_params.verify_ssl false -p output_params.use_gzip true)
push_status=$(echo ${push_v} | jq .status)
echo ${push_v} | jq .
if [ ${push_status} != 200 ]; then
echo "Validating Push subscription failed"
exit -1
fi
echo 'Creating Push from Historic'
push=$(ds -e push -c create -p playback_id ${historic_id} -p name "Playback CLI @ $start" -p output_type http \
-p output_params.method post -p output_params.url 'http://ec2-50-19-63-138.compute-1.amazonaws.com:80' \
-p output_params.delivery_frequency 0 -p output_params.max_size 102400 -p output_params.auth.type none \
-p output_params.verify_ssl false -p output_params.use_gzip true)
echo "Created push subscription for historic"
echo ${push} | jq .
push_id=$(echo ${push} | jq -r .body.id)
echo 'Starting Historic query'
ds -e historics -c start -p id ${historic_id} | jq .
echo 'Getting Historic status'
ds -e historics -c status -p start ${start} -p end ${end} | jq .
echo 'Getting Historics'
ds -e historics -c get -p id ${historic_id} | jq .
echo 'Updating historic'
ds -e historics -c update -p id ${historic_id} -p name "Some name @ $start - CLI" | jq .
echo 'Getting push'
ds -e push -c get -p id ${push_id} | jq .
echo 'Getting push logs'
ds -e push -c log -p id ${push_id} | jq .
echo 'Pausing push'
ds -e push -c pause -p id ${push_id} | jq .
echo 'Resuming push'
ds -e push -c resume -p id ${push_id} | jq .
echo 'Stopping Historic'
ds -e historics -c stop -p id ${historic_id} | jq .
echo 'Deleting Historic'
ds -e historics -c delete -p id ${historic_id} | jq .
echo 'Stopping push'
ds -e push -c stop -p id ${push_id} | jq .
echo 'Deleting push'
ds -e push -c delete -p id ${push_id} | jq .
#todo update push, pull
echo "Attempting to create a Historics preview"
preview=$(ds -e preview -c create -p start ${start} -p end ${end} -p hash ${hash} -p sources tumblr \
-p parameters 'interaction.author.link,targetVol,hour;interaction.type,freqDist,10')
echo ${preview} | jq .
preview_id=$(echo ${preview} | jq -r .body.id)
echo "Getting the preview we created"
ds -e preview -c get -p id ${preview_id} | jq .
echo "Creating a managed source"
source=$(ds -e managed_sources -c create -p source_type instagram -p name api \
-p auth "[{\"parameters\":{\"value\":\"$start$end\"}}]" \
-p resources '[{"parameters":{"value":"cats","type":"tag"}}]' \
-p parameters '{"comments":true,"likes":false}')
echo ${source}
source_id=$(echo ${source}| jq -r .body.id)
echo ${source_id}
echo "Starting managed source"
ds -e managed_sources -c start -p source_id ${source_id} | jq .
echo "Getting managed sources"
ds -e managed_sources -c get | jq .
echo "Getting Instagram sources"
ds -e managed_sources -c get -p source_type instagram | jq .
echo "Getting Facebook page sources"
ds -e managed_sources -c get -p source_type facebook_page | jq .
echo "Getting page 2 of instagram sources"
ds -e managed_sources -c get -p source_type instagram -p page 2 | jq .
echo "Getting source for $source_id"
ds -e managed_sources -c get -p source_id ${source_id} | jq .
echo "Getting logs for source $source_id"
ds -e managed_sources -c log -p source_id ${source_id} -p page 2 -p per_page 1 | jq .
echo "Stopping managed source"
ds -e managed_sources -c stop -p source_id ${source_id} | jq .
echo "Deleting managed source $source_id"
ds -e managed_sources -c delete -p source_id ${source_id} | jq .
echo "Submitting ODP data via batch to test source"
ds -e odp -c batch -p source_id twitter_gnip_source -p data "{\"id\":\"interaction\"}\n{\"id\":\"interaction2\"}\n"
|
datasift/datasift-java
|
src/test/resources/cli.sh
|
Shell
|
mit
| 5,123 |
#!/bin/bash
stack haddock --open haskellserver
|
maciejspychala/haskell-server
|
open_haddock.sh
|
Shell
|
mit
| 47 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/GoogleToolboxForMac/GoogleToolboxForMac.framework"
install_framework "$BUILT_PRODUCTS_DIR/Material/Material.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mixpanel-swift/Mixpanel.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/GoogleToolboxForMac/GoogleToolboxForMac.framework"
install_framework "$BUILT_PRODUCTS_DIR/Material/Material.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mixpanel-swift/Mixpanel.framework"
fi
|
yanqingsmile/ChemicalCalculator
|
Pods/Target Support Files/Pods-ChemicalCalculator/Pods-ChemicalCalculator-frameworks.sh
|
Shell
|
mit
| 3,945 |
#!/usr/bin/env bash
python3 -m venv venv
source venv/bin/activate
python -m pip install --upgrade pip
pip install external/okta-0.0.3.1-py2.py3-none-any.whl
pip install -e .
pip install -e .[test]
pip install -e .[setup]
pip uninstall -y enum34
make $BUILD_TARGET
pwd
.build/.travis/release.sh
python setup.py test
|
adorton-adobe/user-sync.py
|
.build/.travis/build-py36-centos.sh
|
Shell
|
mit
| 315 |
#!/bin/sh
(
sleep 1
echo b
sleep 1
) &
PROC=$!
echo a
wait $PROC
echo c
|
tov/shcaml
|
examples/wait.sh
|
Shell
|
mit
| 86 |
#!/bin/bash
MYDIR=$(dirname $0)
source $MYDIR/apigee-kafka-lib.sh
cd $APIGEE_KAFKA_HOME/var/logs/kafka-rest
$CONFLUENT_HOME/bin/kafka-rest-start -daemon $CONFLUENT_HOME/etc/kafka-rest/kafka-rest.properties
|
gsjurseth/ConfluentPlatformAutomation
|
bin/startKafkaRest.sh
|
Shell
|
mit
| 208 |
# standard
misc/test-scripts/benchmark.sh 20 "U48" "1 2 4 8" "./prank.sh predict -c workdef u48.ds -l SPEEDTEST"
misc/test-scripts/benchmark.sh 10 "U48" "1 2 4 8 12 16 20 24" "./prank.sh predict -c working u48.ds -l SPEEDTEST"
misc/test-scripts/benchmark.sh 5 "U48" "1 2 4 8 12 16" "./prank.sh predict -c config/working fptrain.ds -l SPEEDTEST"
misc/test-scripts/benchmark.sh 3 "U48" "12" "./prank.sh traineval -t chen11-fpocket.ds -e joined.ds -c config/working -loop 1 -l SPEEDTEST"
misc/test-scripts/benchmark.sh 1 "dt198" "1" "./prank.sh predict -c workdef dt198.ds -l SPEEDTEST"
|
rdk/p2rank
|
misc/test-scripts/standard-benchmarks.sh
|
Shell
|
mit
| 612 |
#!/bin/bash
# ----------------------
# KUDU Deployment Script
# Version: 0.1.7
# ----------------------
# Helpers
# -------
exitWithMessageOnError () {
if [ ! $? -eq 0 ]; then
echo "An error has occurred during web site deployment."
echo $1
exit 1
fi
}
# Prerequisites
# -------------
# Verify node.js installed
hash node 2>/dev/null
exitWithMessageOnError "Missing node.js executable, please install node.js, if already installed make sure it can be reached from current environment."
# Setup
# -----
SCRIPT_DIR="${BASH_SOURCE[0]%\\*}"
SCRIPT_DIR="${SCRIPT_DIR%/*}"
ARTIFACTS=$SCRIPT_DIR/../artifacts
KUDU_SYNC_CMD=${KUDU_SYNC_CMD//\"}
if [[ ! -n "$DEPLOYMENT_SOURCE" ]]; then
DEPLOYMENT_SOURCE=$SCRIPT_DIR
fi
if [[ ! -n "$NEXT_MANIFEST_PATH" ]]; then
NEXT_MANIFEST_PATH=$ARTIFACTS/manifest
if [[ ! -n "$PREVIOUS_MANIFEST_PATH" ]]; then
PREVIOUS_MANIFEST_PATH=$NEXT_MANIFEST_PATH
fi
fi
if [[ ! -n "$DEPLOYMENT_TARGET" ]]; then
DEPLOYMENT_TARGET=$ARTIFACTS/wwwroot
else
KUDU_SERVICE=true
fi
if [[ ! -n "$KUDU_SYNC_CMD" ]]; then
# Install kudu sync
echo Installing Kudu Sync
npm install kudusync -g --silent
exitWithMessageOnError "npm failed"
if [[ ! -n "$KUDU_SERVICE" ]]; then
# In case we are running locally this is the correct location of kuduSync
KUDU_SYNC_CMD=kuduSync
else
# In case we are running on kudu service this is the correct location of kuduSync
KUDU_SYNC_CMD=$APPDATA/npm/node_modules/kuduSync/bin/kuduSync
fi
fi
# Node Helpers
# ------------
selectNodeVersion () {
if [[ -n "$KUDU_SELECT_NODE_VERSION_CMD" ]]; then
SELECT_NODE_VERSION="$KUDU_SELECT_NODE_VERSION_CMD \"$DEPLOYMENT_SOURCE\" \"$DEPLOYMENT_TARGET\" \"$DEPLOYMENT_TEMP\""
eval $SELECT_NODE_VERSION
exitWithMessageOnError "select node version failed"
if [[ -e "$DEPLOYMENT_TEMP/__nodeVersion.tmp" ]]; then
NODE_EXE=`cat "$DEPLOYMENT_TEMP/__nodeVersion.tmp"`
exitWithMessageOnError "getting node version failed"
fi
if [[ -e "$DEPLOYMENT_TEMP/.tmp" ]]; then
NPM_JS_PATH=`cat "$DEPLOYMENT_TEMP/__npmVersion.tmp"`
exitWithMessageOnError "getting npm version failed"
fi
if [[ ! -n "$NODE_EXE" ]]; then
NODE_EXE=node
fi
NPM_CMD="\"$NODE_EXE\" \"$NPM_JS_PATH\""
else
NPM_CMD=npm
NODE_EXE=node
fi
}
##################################################################################################################################
# Deployment
# ----------
echo Handling node.js deployment.
# 1. KuduSync
if [[ "$IN_PLACE_DEPLOYMENT" -ne "1" ]]; then
"$KUDU_SYNC_CMD" -v 50 -f "$DEPLOYMENT_SOURCE" -t "$DEPLOYMENT_TARGET" -n "$NEXT_MANIFEST_PATH" -p "$PREVIOUS_MANIFEST_PATH" -i ".git;.hg;.deployment;deploy.sh"
exitWithMessageOnError "Kudu Sync failed"
fi
# 2. Select node version
selectNodeVersion
# 3. Install npm packages
if [ -e "$DEPLOYMENT_TARGET/package.json" ]; then
cd "$DEPLOYMENT_TARGET"
eval $NPM_CMD install --production
exitWithMessageOnError "npm failed"
cd - > /dev/null
fi
# 4. Install bower packages
if [ -e "$DEPLOYMENT_TARGET/bower.json" ]; then
cd "$DEPLOYMENT_TARGET"
eval $NPM_CMD install bower
exitWithMessageOnError "installing bower failed"
./node_modules/.bin/bower install
exitWithMessageOnError "bower failed"
cd - > /dev/null
fi
# 5. Run ulp
if [ -e "$DEPLOYMENT_TARGET/gulpfile.js" ]; then
cd "$DEPLOYMENT_TARGET"
./node_modules/gulp/bin/gulp.js scripts
exitWithMessageOnError "gulp failed"
cd - > /dev/null
fi
##################################################################################################################################
# Post deployment stub
if [[ -n "$POST_DEPLOYMENT_ACTION" ]]; then
POST_DEPLOYMENT_ACTION=${POST_DEPLOYMENT_ACTION//\"}
cd "${POST_DEPLOYMENT_ACTION_DIR%\\*}"
"$POST_DEPLOYMENT_ACTION"
exitWithMessageOnError "post deployment action failed"
fi
echo "Finished successfully."
|
leeric92/HumpbackSeahorses
|
deploy.sh
|
Shell
|
mit
| 3,932 |
mkdir -p $(rbenv root)/plugins
cd $(rbenv root)/plugins
test -d rbenv-each || git clone https://github.com/rbenv/rbenv-each.git
test -d rbenv-update || git clone https://github.com/rkh/rbenv-update.git
test -d rbenv-default-gems || git clone https://github.com/sstephenson/rbenv-default-gems.git
cat > $(rbenv root)/default-gems <<EOF
awesome_print
bundler
bundler-audit
byebug
byebug-color-printer
foreman
license_finder
lunchy
qwandry
EOF
RUBY_VERSION=2.6.5
rbenv versions | grep -q $RUBY_VERSION || rbenv install $RUBY_VERSION
rbenv global $RUBY_VERSION
|
lenn4rd/dotfiles
|
ruby/install.sh
|
Shell
|
mit
| 576 |
#!/bin/sh
set -e
set -x
scriptDir=$(dirname $0)
dockerFile=$scriptDir/Dockerfile
imageName=nginx-react-playground:latest
port=${APP_PORT:-80}
build() {
docker build -f "$dockerFile" -t "$imageName" .
}
run() {
printf "Application started. To check it open link http://localhost:%s\n\n" "$port"
docker run -p "$port":80 "$imageName"
}
case $1 in
build)
build
;;
run)
run
;;
*)
printf "\nError. unknown command: \"%s\",\naccepted: build | run\n" "$1"
exit 13
;;
esac
|
rodmax/react-playground
|
tools/nginx/nginx-docker.sh
|
Shell
|
mit
| 548 |
#!/usr/bin/env bash
cp -r ./app/static ./app/mod_home/
python run.py build;
cd ./app/build;
python -m http.server 80;
|
empiricalstateofmind/personal_website
|
build.sh
|
Shell
|
mit
| 117 |
#!/bin/sh
if echo "$1" | grep -Eq 'i[[:digit:]]86-'; then
echo x86
else
echo "$1" | grep -Eo '^[[:alnum:]_]*'
fi
|
DevilGladiator/XeonOS
|
target-triplet-to-arch.sh
|
Shell
|
mit
| 118 |
node ../../single-c-file.js SimpleTest
g++ SimpleTest_merged.cxx
./a.out
|
dwd31415/single-c-file
|
Tests/SimpleTest/build.sh
|
Shell
|
mit
| 73 |
#!/bin/bash
# Written by Nanbo Sun and CBIG under MIT license: https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
###########################################
# Usage and Reading in Parameters
###########################################
# Usage
usage() { echo "
Usage: $0 -i <imgList> -d <idList> -s <step> -t <newTemp> -c <scriptDir> \
-p <spmDir> -o <outDir> [-r <reorient>] [-q <queue>]
Given template, run VBM with 4 stages. For the 1st stage, it convert raw
image to nifti file. For the 2nd stage, the user need to reorient the images
until all images have origins in AC. And the author need to update the idList.
For the 3rd stage, it runs initial segmentation and the user should check the
segmentation results by ${outDir}/slicesdir/index.html. After checking, the
user should update the ${idList}. For the 4th stage, it runs remaining 7 to 12 steps.
- imgList Text file with each line being the path to a T1 image
- idList Text file with each line being the ID to a subject. The
id list should correspond to the img list line by line.
- step Run steps from 1, 1a, 6_2a, 7_12
- newTemp The given template used for segmentation.
- scriptDir Directory of current script
- spmDir Director of spm software
- outDir Output directory; e.g., ~/outputs/VBM/
- reorient If step = 1a, this is a single reorientation matrix.
If step = 1b, this is a list of reorientation matrix.
- queue (Optional) if you have a cluster, use it to specify the
queue to which you want to qsub these jobs; if not provided,
jobs will run serially (potentially very slow!)
" 1>&2; exit 1; }
# Reading in parameters
while getopts ":i:d:s:t:c:p:o:r:q:" opt; do
case "${opt}" in
i) imgList=${OPTARG};;
d) idList=${OPTARG};;
s) step=${OPTARG};;
t) newTemp=${OPTARG};;
c) scriptDir=${OPTARG};;
p) spmDir=${OPTARG};;
o) outDir=${OPTARG};;
r) reorient=${OPTARG};;
q) queue=${OPTARG};;
*) usage;;
esac
done
shift $((OPTIND-1))
if [ -z "${imgList}" ] || [ -z "${idList}" ] || [ -z "${step}" ] || \
[ -z "${newTemp}" ] || [ -z "${scriptDir}" ] || [ -z "${spmDir}" ] || [ -z "${outDir}" ]; then
echo Missing Parameters!
usage
fi
if [ -z "${queue}" ]; then
queue=""
fi
###########################################
# Main
###########################################
cd ${scriptDir}
if [ ${step} == "1" ]; then
###### Step 1: Convert raw image to nifti file
./CBIG_MMLDA_step1_raw2nii.sh -i ${imgList} -d ${idList} -o ${outDir} -q ${queue}
elif [ ${step} == "1a" ]; then
###### Step 1a: Reorient the T1 image
if [ ! -z ${reorient} ]; then
./CBIG_MMLDA_step1a_reorient.sh -i ${outDir} -d ${idList} -r ${reorient} \
-s ${scriptDir} -p ${spmDir} -q ${queue}
fi
elif [ ${step} == "1b" ]; then
###### Step 1b: Apply reorientation matrix to T1 images.
if [ ! -z ${reorient} ]; then
./CBIG_MMLDA_step1b_apply_reorient_matrix.sh -i ${outDir} -d ${idList} \
-r ${reorient} -s ${scriptDir} -p ${spmDir} -q ${queue}
fi
elif [ ${step} == "6_2a" ]; then
###### Step 6: Segmentation using new templates.
./CBIG_MMLDA_step6_seg_new_template.sh -i ${outDir} -d ${idList} -t ${newTemp} \
-s ${scriptDir} -p ${spmDir} -q ${queue}
###### Step 2a: Check segmentation results.
./CBIG_MMLDA_step2a_check_segment.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
elif [ ${step} == "7_12" ]; then
###### Step 7: Smoothing.
./CBIG_MMLDA_step7_smooth.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
###### Step 8: Merge and create mask.
./CBIG_MMLDA_step8_merge_create_mask.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
###### Step 9: Compute GM and ICV.
./CBIG_MMLDA_step9_compute_GM_ICV.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
###### Step 10: Downsample to MNI 2mm.
./CBIG_MMLDA_step10_downsample_2mm.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
###### Step 11: Smoothing 2mm.
./CBIG_MMLDA_step11_smooth_2mm.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
###### Step 12: Merge and create mask 2mm.
./CBIG_MMLDA_step12_merge_create_mask_2mm.sh -i ${outDir} -d ${idList} -s ${scriptDir} -p ${spmDir} -q ${queue}
fi
|
ThomasYeoLab/CBIG
|
stable_projects/disorder_subtypes/Sun2019_ADJointFactors/step1_SPM_VBM/code/CBIG_MMLDA_runVBM_givenTemp.sh
|
Shell
|
mit
| 4,617 |
cefmdd_v1 -f ./C1_CP_EFW_L1_IB__20141230_000000_20141231_000000_V170103.cef $1 $2 $3 $4
|
caa-dev-apps/cefmdd_v1
|
tests/2017-04-15__v1.0.21/t_02_allow_FILE_TIME_SPAN_in_quotes_warn/_t_01.sh
|
Shell
|
mit
| 89 |
#!/bin/sh
runclass=Client.creatEvent
classpath=./lib/*:./bin
java -cp $classpath $runclass
|
xizeroplus/matching-algorithm
|
PhSHI-Kafka-Test/creatEvent.sh
|
Shell
|
mit
| 91 |
#!/bin/sh
# success test octal
../build/examples/test19 -i 012 > tmp.out 2>&1
if cmp -s tmp.out $srcdir/test72.out; then
exit 0
else
exit 1
fi
|
mjkoo/tclap
|
tests/test72.sh
|
Shell
|
mit
| 149 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2799-1
#
# Security announcement date: 2013-11-16 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:45 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - chromium-browser:31.0.1650.57-1~deb7u1
#
# Last versions recommanded by security team:
# - chromium-browser:37.0.2062.120-1~deb7u1
#
# CVE List:
# - CVE-2013-2931
# - CVE-2013-6621
# - CVE-2013-6622
# - CVE-2013-6623
# - CVE-2013-6624
# - CVE-2013-6625
# - CVE-2013-6626
# - CVE-2013-6627
# - CVE-2013-6628
# - CVE-2013-6629
# - CVE-2013-6630
# - CVE-2013-6631
# - CVE-2013-6632
# - CVE-2013-6802
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade chromium-browser=37.0.2062.120-1~deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2013/DSA-2799-1.sh
|
Shell
|
mit
| 922 |
#!/usr/bin/env bash
#% 🔺 JQ_IMAGE_VERSION %% jq CLI Docker image version %% latest
if [ ! ${JQ_IMAGE_VERSION:-} ]; then
export JQ_IMAGE_VERSION=latest
fi
#% 🔺 JQ_IMAGE %% jq CLI Docker image %% stedolan/jq:${JQ_IMAGE_VERSION}
if [ ! ${JQ_IMAGE:-} ]; then
export JQ_IMAGE=stedolan/jq:${JQ_IMAGE_VERSION}
fi
#% 🔺 JQ_CMD %% Override command for jq CLI Docker container %%
if [ ! ${JQ_CMD:-} ]; then
export JQ_CMD=""
fi
jq_cli() {
printDebug "jq args: $@"
dockerRun -i ${JQ_IMAGE} ${JQ_CMD} "$@"
}
|
wheniwork/harpoon
|
tasks/jq/bootstrap.sh
|
Shell
|
mit
| 512 |
#!/bin/bash
./docker/ubuntu-nginx-phpfpm-redis-mysql.sh
./accon/accon.sh
./accon/download.sh
./accon/menus.sh
./accon/treebooks.sh
./accon/sample.sh
|
maemori/accon
|
release/all_release.sh
|
Shell
|
mit
| 149 |
#!/bin/sh
user="$(id -un 2>/dev/null || true)"
if [ "$user" != 'root' ]; then
echo "Error: this installer needs the ability to run commands as root."
exit 1
fi
lsb_dist="$(lsb_release -si)"
if [ $lsb_dist = "Debian1" ] || [ $lsb_dist = "Ubuntu" ]; then
apt-get update && apt-get install python-pip -y && pip install docker-compose
echo "Install success."
exit 0
else
echo "We only support Ubuntu and Debian, nstall failed. Your lsb_dist is $lsb_dist."
exit 1
fi
|
xczh/docker-registry-v2
|
install.sh
|
Shell
|
mit
| 470 |
#!/bin/bash
set -e
if [[ "$1" == apache2* ]] || [ "$1" == php-fpm ]; then
if [ -n "$MYSQL_PORT_3306_TCP" ]; then
if [ -z "$WORDPRESS_DB_HOST" ]; then
WORDPRESS_DB_HOST='mysql'
else
echo >&2 'warning: both WORDPRESS_DB_HOST and MYSQL_PORT_3306_TCP found'
echo >&2 " Connecting to WORDPRESS_DB_HOST ($WORDPRESS_DB_HOST)"
echo >&2 ' instead of the linked mysql container'
fi
fi
if [ -z "$WORDPRESS_DB_HOST" ]; then
echo >&2 'error: missing WORDPRESS_DB_HOST and MYSQL_PORT_3306_TCP environment variables'
echo >&2 ' Did you forget to --link some_mysql_container:mysql or set an external db'
echo >&2 ' with -e WORDPRESS_DB_HOST=hostname:port?'
exit 1
fi
# if we're linked to MySQL, and we're using the root user, and our linked
# container has a default "root" password set up and passed through... :)
: ${WORDPRESS_DB_USER:=root}
if [ "$WORDPRESS_DB_USER" = 'root' ]; then
: ${WORDPRESS_DB_PASSWORD:=$MYSQL_ENV_MYSQL_ROOT_PASSWORD}
fi
: ${WORDPRESS_DB_NAME:=wordpress}
if [ -z "$WORDPRESS_DB_PASSWORD" ]; then
echo >&2 'error: missing required WORDPRESS_DB_PASSWORD environment variable'
echo >&2 ' Did you forget to -e WORDPRESS_DB_PASSWORD=... ?'
echo >&2
echo >&2 ' (Also of interest might be WORDPRESS_DB_USER and WORDPRESS_DB_NAME.)'
exit 1
fi
set -x
if ! [ -e index.php -a -e wp-includes/version.php ]; then
echo >&2 "WordPress not found in $(pwd) - copying now..."
#if [ "$(ls -A)" ]; then
# echo >&2 "WARNING: $(pwd) is not empty - press Ctrl+C now if this is an error!"
# ( set -x; ls -A; sleep 10 )
#fi
{
env
id
pwd
ls -la /usr/src
ls -la /usr/src/wordpress
ls -la /var
ls -la /var/www
ls -la /var/www/html
} >&2
#tar cf - --one-file-system -C /usr/src/wordpress . | tar xf - --owner=1001
cp -rva . /usr/src/wordpress
echo >&2 "Complete! WordPress has been successfully copied to $(pwd)"
if [ ! -e .htaccess ]; then
# NOTE: The "Indexes" option is disabled in the php:apache base image
cat > .htaccess <<-'EOF'
# BEGIN WordPress
<IfModule mod_rewrite.c>
RewriteEngine On
RewriteBase /
RewriteRule ^index\.php$ - [L]
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /index.php [L]
</IfModule>
# END WordPress
EOF
#chown www-data:www-data .htaccess
fi
fi
# TODO handle WordPress upgrades magically in the same way, but only if wp-includes/version.php's $wp_version is less than /usr/src/wordpress/wp-includes/version.php's $wp_version
if [ ! -e wp-config.php ]; then
awk '/^\/\*.*stop editing.*\*\/$/ && c == 0 { c = 1; system("cat") } { print }' wp-config-sample.php > wp-config.php <<'EOPHP'
// If we're behind a proxy server and using HTTPS, we need to alert Wordpress of that fact
// see also http://codex.wordpress.org/Administration_Over_SSL#Using_a_Reverse_Proxy
if (isset($_SERVER['HTTP_X_FORWARDED_PROTO']) && $_SERVER['HTTP_X_FORWARDED_PROTO'] === 'https') {
$_SERVER['HTTPS'] = 'on';
}
EOPHP
#chown www-data:www-data wp-config.php
fi
# see http://stackoverflow.com/a/2705678/433558
sed_escape_lhs() {
echo "$@" | sed 's/[]\/$*.^|[]/\\&/g'
}
sed_escape_rhs() {
echo "$@" | sed 's/[\/&]/\\&/g'
}
php_escape() {
php -r 'var_export((string) $argv[1]);' "$1"
}
set_config() {
key="$1"
value="$2"
regex="(['\"])$(sed_escape_lhs "$key")\2\s*,"
if [ "${key:0:1}" = '$' ]; then
regex="^(\s*)$(sed_escape_lhs "$key")\s*="
fi
sed -ri "s/($regex\s*)(['\"]).*\3/\1$(sed_escape_rhs "$(php_escape "$value")")/" wp-config.php
}
set_config 'DB_HOST' "$WORDPRESS_DB_HOST"
set_config 'DB_USER' "$WORDPRESS_DB_USER"
set_config 'DB_PASSWORD' "$WORDPRESS_DB_PASSWORD"
set_config 'DB_NAME' "$WORDPRESS_DB_NAME"
# allow any of these "Authentication Unique Keys and Salts." to be specified via
# environment variables with a "WORDPRESS_" prefix (ie, "WORDPRESS_AUTH_KEY")
UNIQUES=(
AUTH_KEY
SECURE_AUTH_KEY
LOGGED_IN_KEY
NONCE_KEY
AUTH_SALT
SECURE_AUTH_SALT
LOGGED_IN_SALT
NONCE_SALT
)
for unique in "${UNIQUES[@]}"; do
eval unique_value=\$WORDPRESS_$unique
if [ "$unique_value" ]; then
set_config "$unique" "$unique_value"
else
# if not specified, let's generate a random value
current_set="$(sed -rn "s/define\((([\'\"])$unique\2\s*,\s*)(['\"])(.*)\3\);/\4/p" wp-config.php)"
if [ "$current_set" = 'put your unique phrase here' ]; then
set_config "$unique" "$(head -c1M /dev/urandom | sha1sum | cut -d' ' -f1)"
fi
fi
done
if [ "$WORDPRESS_TABLE_PREFIX" ]; then
set_config '$table_prefix' "$WORDPRESS_TABLE_PREFIX"
fi
TERM=dumb php -- "$WORDPRESS_DB_HOST" "$WORDPRESS_DB_USER" "$WORDPRESS_DB_PASSWORD" "$WORDPRESS_DB_NAME" <<'EOPHP'
<?php
// database might not exist, so let's try creating it (just to be safe)
$stderr = fopen('php://stderr', 'w');
list($host, $port) = explode(':', $argv[1], 2);
$maxTries = 10;
do {
$mysql = new mysqli($host, $argv[2], $argv[3], '', (int)$port);
if ($mysql->connect_error) {
fwrite($stderr, "\n" . 'MySQL Connection Error: (' . $mysql->connect_errno . ') ' . $mysql->connect_error . "\n");
--$maxTries;
if ($maxTries <= 0) {
exit(1);
}
sleep(3);
}
} while ($mysql->connect_error);
if (!$mysql->query('CREATE DATABASE IF NOT EXISTS `' . $mysql->real_escape_string($argv[4]) . '`')) {
fwrite($stderr, "\n" . 'MySQL "CREATE DATABASE" Error: ' . $mysql->error . "\n");
$mysql->close();
exit(1);
}
$mysql->close();
EOPHP
fi
exec "$@"
|
caruccio/wordpress
|
apache/docker-entrypoint.sh
|
Shell
|
gpl-2.0
| 5,480 |
#!/bin/sh
# --- SDE-COPYRIGHT-NOTE-BEGIN ---
# This copyright note is auto-generated by ./scripts/Create-CopyPatch.
#
# Filename: lib/misc/showdeps.sh
# Copyright (C) 2008 The OpenSDE Project
# Copyright (C) 2004 - 2006 The T2 SDE Project
#
# More information can be found in the files COPYING and README.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License. A copy of the
# GNU General Public License can be found in the file COPYING.
# --- SDE-COPYRIGHT-NOTE-END ---
root=
usage() {
cat <<EOT
usage: $0 [-root <root>] [<pkg>]+
EOT
}
getdeps() {
if [ -f package/*/$1/$1.cache ]; then
grep -e "^\[DEP\]" package/*/$1/$1.cache | cut -d' ' -f2- | tr '\n' ' '
else
echo "unknown"
fi
}
getprio() {
local prio=
if [ -f package/*/$1/$1.desc ]; then
prio=`sed -n -e "s,^\[P\] . .* \(.*\),\1,p" package/*/$1/$1.desc`
fi
[ -n "$prio" ] && echo "$prio" || echo "---.---"
}
pkginstalled() {
[ -f $root/var/adm/packages/$1 ]
}
digdeps() {
local deep="$1" pkg="$2" prefix="$3"
local cache="$4" banner= dep=
[ $deep -eq 0 ] && return 0
(( deep-- ))
banner="$pkg($( getprio $pkg ))"
if pkginstalled $pkg; then
banner="$banner+"
else
banner="$banner-"
fi
echo -e "$prefix$banner"
for dep in $( getdeps $pkg ); do
if [ "$dep" == "unknown" ]; then
echo -e "$prefix$banner\tNODEPS"
elif [ -z "$(echo "$cache" | grep ":$dep:" )" ]; then
digdeps $deep $dep "$prefix$banner\t" "$cache$pkg:"
fi
done
}
while [ $# -ne 0 ]; do
case "$1" in
-root) root=$2
shift ;;
-*) echo "ERROR: Option $1 is not recognized."
usage; exit 1 ;;
*)
break;
esac
shift
done
for pkg; do
if [ -f package/*/$pkg/$pkg.desc ]; then
digdeps 2 $pkg '' ':'
else
echo "ERROR: '$pkg' not found!" 1>&2
fi
done
|
OpenSDE/opensde-nopast
|
lib/misc/showdeps.sh
|
Shell
|
gpl-2.0
| 1,881 |
#! /bin/sh
# Copyright (C) 2007-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Test to make sure the standard include order is stable.
# Report by Kent Boortz.
required=cc
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_CONFIG_HEADERS([sub/config.h])
AC_CONFIG_FILES([sub/bar.h])
AC_OUTPUT
END
cat > Makefile.am << 'END'
bin_PROGRAMS = foo
foo_SOURCES = foo.c
BUILT_SOURCES = bar.h
END
mkdir sub
cat >foo.c <<'END'
#include <config.h>
#include <bar.h>
int main() { return bar (); }
END
cat >bar.h <<'END'
int bar () { return 0; }
END
cat >sub/bar.h.in <<'END'
choke me
END
$ACLOCAL
$AUTOCONF
$AUTOHEADER
$AUTOMAKE
mkdir build
cd build
../configure -C
$MAKE
cd ..
./configure -C
$MAKE
|
Starlink/automake
|
t/stdinc.sh
|
Shell
|
gpl-2.0
| 1,315 |
import subprocess
subprocess.call('./clear')
|
Augustus061193/OpenCollegeGraph
|
d2rq/test.sh
|
Shell
|
gpl-2.0
| 49 |
#!/usr/bin/env sh
# generated from catkin/cmake/template/setup.sh.in
# Sets various environment variables and sources additional environment hooks.
# It tries it's best to undo changes from a previously sourced setup file before.
# Supported command line options:
# --extend: skips the undoing of changes from a previously sourced setup file
# since this file is sourced either use the provided _CATKIN_SETUP_DIR
# or fall back to the destination set at configure time
: ${_CATKIN_SETUP_DIR:=/home/parallels/appprs/workspace/src/my_controller_pkg/build/devel}
_SETUP_UTIL="$_CATKIN_SETUP_DIR/_setup_util.py"
unset _CATKIN_SETUP_DIR
if [ ! -f "$_SETUP_UTIL" ]; then
echo "Missing Python script: $_SETUP_UTIL"
return 22
fi
# detect if running on Darwin platform
_UNAME=`uname -s`
_IS_DARWIN=0
if [ "$_UNAME" = "Darwin" ]; then
_IS_DARWIN=1
fi
unset _UNAME
# make sure to export all environment variables
export CMAKE_PREFIX_PATH
export CPATH
if [ $_IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH
else
export DYLD_LIBRARY_PATH
fi
unset _IS_DARWIN
export PATH
export PKG_CONFIG_PATH
export PYTHONPATH
# remember type of shell if not already set
if [ -z "$CATKIN_SHELL" ]; then
CATKIN_SHELL=sh
fi
# invoke Python script to generate necessary exports of environment variables
# use TMPDIR if it exists, otherwise fall back to /tmp
if [ -d "${TMPDIR}" ]; then
_TMPDIR="${TMPDIR}"
else
_TMPDIR=/tmp
fi
_SETUP_TMP=`mktemp "${_TMPDIR}/setup.sh.XXXXXXXXXX"`
unset _TMPDIR
if [ $? -ne 0 -o ! -f "$_SETUP_TMP" ]; then
echo "Could not create temporary file: $_SETUP_TMP"
return 1
fi
CATKIN_SHELL=$CATKIN_SHELL "$_SETUP_UTIL" $@ >> "$_SETUP_TMP"
_RC=$?
if [ $_RC -ne 0 ]; then
if [ $_RC -eq 2 ]; then
echo "Could not write the output of '$_SETUP_UTIL' to temporary file '$_SETUP_TMP': may be the disk if full?"
else
echo "Failed to run '\"$_SETUP_UTIL\" $@': return code $_RC"
fi
unset _RC
unset _SETUP_UTIL
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
return 1
fi
unset _RC
unset _SETUP_UTIL
. "$_SETUP_TMP"
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
# source all environment hooks
_i=0
while [ $_i -lt $_CATKIN_ENVIRONMENT_HOOKS_COUNT ]; do
eval _envfile=\$_CATKIN_ENVIRONMENT_HOOKS_$_i
unset _CATKIN_ENVIRONMENT_HOOKS_$_i
eval _envfile_workspace=\$_CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
unset _CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
# set workspace for environment hook
CATKIN_ENV_HOOK_WORKSPACE=$_envfile_workspace
. "$_envfile"
unset CATKIN_ENV_HOOK_WORKSPACE
_i=$((_i + 1))
done
unset _i
unset _CATKIN_ENVIRONMENT_HOOKS_COUNT
|
Oflameo/APPPRS
|
2015/workspace/src/my_controller_pkg/build/devel/setup.sh
|
Shell
|
gpl-2.0
| 2,579 |
#!/bin/sh
# Copyright (C) 2009-2022 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Silent rules: use of pre-defined variables $(AM_V_GEN) and $(AM_V_at).
# Incidentally, also check that silent rules are disabled by default.
. test-init.sh
echo AC_OUTPUT >> configure.ac
cat > Makefile.am <<'EOF'
all-local: foo
## And here's how you should do it in your own code:
foo: foo.in
$(AM_V_GEN)cp $(srcdir)/foo.in $@
$(AM_V_at)echo more >> $@
EXTRA_DIST = foo.in
CLEANFILES = foo
EOF
: >foo.in
$ACLOCAL
$AUTOMAKE --add-missing
$AUTOCONF
# Silent rules are disabled by default, since we haven't called
# "AM_SILENT_RULES([yes])" explicitly.
./configure
run_make -O
grep 'GEN ' stdout && exit 1
grep 'cp ' stdout
grep 'echo ' stdout
$MAKE clean
run_make -O V=1
grep 'GEN ' stdout && exit 1
grep 'cp ' stdout
grep 'echo ' stdout
$MAKE clean
run_make -O V=0
grep 'GEN .*foo' stdout
grep 'cp ' stdout && exit 1
grep 'echo ' stdout && exit 1
$MAKE distclean
./configure --enable-silent-rules
run_make -O
grep 'GEN .*foo' stdout
grep 'cp ' stdout && exit 1
grep 'echo ' stdout && exit 1
$MAKE clean
run_make -O V=0
grep 'GEN .*foo' stdout
grep 'cp ' stdout && exit 1
grep 'echo ' stdout && exit 1
$MAKE clean
run_make -O V=1
grep 'GEN ' stdout && exit 1
grep 'cp ' stdout
grep 'echo ' stdout
:
|
autotools-mirror/automake
|
t/silent-gen.sh
|
Shell
|
gpl-2.0
| 1,906 |
#!/bin/bash
# A simple script to comment in/out the "Host *" section of your local ssh config.
# Useful if you use a laptop to ssh both at home and at work and don't want to
# constantly swap out ProxyCommand configs or make 2 configs per server.
#
# I did some basic tests that I could think of but feel free to report any bugs.
# Was written on a Mac so who knows if there's something weird between Mac and Linux.
#
# v1.2
# Jim Bair
# Lock file to avoid concurrency issues if you're bundling this
# command with clusterssh.
lockfile="/tmp/$(basename $0).lock"
if [ -f "${lockfile}" ]; then
# Exit silently since we're assuming concurrency
exit 0
else
touch $lockfile
if [ $? -ne 0 ]; then
echo "Something went wrong creating our lockfile. Exiting."
exit 1
fi
fi
# Find our SSH Config
filename="$HOME/.ssh/config"
if [ ! -s $filename ]; then
echo "ERROR: Cannot find $filename - Exiting."
rm -f $lockfile
exit 1
fi
# Validate our inputs and report usage if needed.
if [ $# -gt 1 ] || [ $# -eq 1 ] && [ "$1" != "status" ]; then
echo "ERROR: Usage: $(basename $0) [status]"
rm -f $lockfile
exit 1
fi
# Check the status of our config, if asked.
if [ $# -eq 1 ] && [ "$1" == 'status' ]; then
egrep -q '#Host \*' $filename
if [ $? -eq 0 ]; then
echo "Status: not active"
else
echo "Status active"
fi
rm -f $lockfile
exit 0
fi
# Store our new config into temp then move it into place later.
ourTemp=$(mktemp)
if [ $? -ne 0 ]; then
echo "ERROR: Unable to create temp file. Exiting."
rm -f $lockfile
exit 1
fi
# Let's preserve indentation in the config file
OLDIFS="$IFS"
IFS=''
# Walk line by line, either passing lines or editing lines.
# We are looking for the catch all host "Host *" to start, then
# either adding a # or removing the first character to strip the #
HOSTBLOCK=no
while read -r line
do
# Looking for the host block.
if [ "$HOSTBLOCK" == 'no' ]; then
# Found the block!
if [ -n "$(echo $line | egrep 'Host \*')" ]; then
HOSTBLOCK=yes
# Now see if we are swapping the catch all in or out
if [ -n "$(echo $line | egrep '^Host \*')" ]; then
echo -n "Disabling bastion host block..."
STYLE='out'
echo "#${line}" >> $ourTemp
else
echo -n "Enabling bastion host block..."
STYLE='in'
echo "${line:1}" >> $ourTemp
fi
else
# Not the Host block so just pass it through
echo "$line" >> $ourTemp
continue
fi
else
# If empty, we are at the end of the Host * section
if [ -z "$(echo $line)" ]; then
HOSTBLOCK=no
echo >> ${ourTemp}
continue
fi
# If here, you are in the host block AND modifying the lines
# based on the style decided above.
# Comment out lines (easy enough)
if [ "$STYLE" == 'out' ]; then
echo "#${line}" >> $ourTemp
# Otherwise, strip out the comment character.
else
# Small sanity check
if [ -z "$(echo $line | egrep '^#')" ]; then
echo "ERROR: Expected a commented out line in the host block but was surprised. Exiting." >&2
rm -f $ourTemp
exit 1
fi
echo "${line:1}" >> $ourTemp
fi
fi
done < "$filename"
# Restore the old IFS even though it probably doesn't matter
IFS="$OLDIFS"
# overwrite our config and remove our temp files
cat $ourTemp > $filename
rm -f $ourTemp $lockfile
# All done
echo 'done.'
exit 0
|
tsuehpsyde/misc
|
bash/host-toggle.sh
|
Shell
|
gpl-2.0
| 3,720 |
#!/bin/sh
# ------------------------------------------------------------------------------
# --- Add Read Groups
# ------------------------------------------------------------------------------
# Check that genome code was passed as parameter
USAGE="$0 genome_code";
if [ -z "$1" ]; then
echo "ERROR: $USAGE";
exit 1;
fi
GENOME_CODE=$1
# Add read groups with Picard:
java -jar ${PICARD}/picard.jar AddOrReplaceReadGroups \
INPUT=results/${IND_ID_W_PE_SE}.bwa.${GENOME_CODE}.fixed.filtered.postdup.bam \
OUTPUT=results/${IND_ID_W_PE_SE}.bwa.${GENOME_CODE}.fixed.filtered.RG.bam \
RGLB=${IND_ID_W_PE_SE} \
RGPL=Illumina \
RGPU=Group1 \
RGSM=${IND_ID_W_PE_SE}
exit;
|
bergeycm/NGS-map
|
scripts/add_read_groups.sh
|
Shell
|
gpl-2.0
| 677 |
echo " "
echo " CLEANING..."
echo " "
make clean
echo "================================================================================================================="
echo " BUILDING MODULES AND COPYING THEM TO RAMDISK"
echo "================================================================================================================="
make -j4 modules
echo " "
echo " "
find . -iname *.ko | xargs cp -frvt ~/project-voodoo-kernel_repack_utils-7be059c/initramfs_root/lib/modules
echo " "
echo " "
echo " "
echo "================================================================================================================="
echo " BUILDING KERNEL"
echo "================================================================================================================="
make -j4
cd arch/arm/boot
ls -la
echo " "
echo " THE END "
echo " "
|
Angel666/ICS_kernel_P1000_CM_version
|
build.sh
|
Shell
|
gpl-2.0
| 864 |
#!/bin/sh
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#--------------------------------------------------------------------------------
echo "Compiling Core Verilog UNISIM/Behavioral model"
vlogcomp -work work ../../../TX_SEND_FIFO.v
vhpcomp -work work ../../example_design/TX_SEND_FIFO_top.vhd
echo "Compiling Test Bench Files"
vhpcomp -work work ../fg_tb_pkg.vhd
vhpcomp -work work ../fg_tb_rng.vhd
vhpcomp -work work ../fg_tb_dgen.vhd
vhpcomp -work work ../fg_tb_dverif.vhd
vhpcomp -work work ../fg_tb_pctrl.vhd
vhpcomp -work work ../fg_tb_synth.vhd
vhpcomp -work work ../fg_tb_top.vhd
vlogcomp -work work $XILINX/verilog/src/glbl.v
fuse work.fg_tb_top work.glbl -L xilinxcorelib_ver -L unisims_ver -o fg_tb_top.exe
./fg_tb_top.exe -gui -tclbatch ./wave_isim.tcl
|
P3Stor/P3Stor
|
pcie/IP core/TX_SEND_FIFO/simulation/functional/simulate_isim.sh
|
Shell
|
gpl-2.0
| 2,902 |
#!/usr/bin/env bash
#
curl https://raw.github.com/creationix/nvm/master/install.sh | sh
/usr/bin/env bash
[[ -s /home/vagrant/.nvm/nvm.sh ]] && . /home/vagrant/.nvm/nvm.sh
nvm install 0.10
nvm alias default 0.10
cd /vagrant/htdocs/js
npm install --optional
|
alexanderfefelov/nav
|
tools/vagrant.d/10-vagrant-node.bash
|
Shell
|
gpl-2.0
| 256 |
#!/bin/bash
python et.py --port /dev/ttyUSB0 write_flash 0x00000 firmware/0x00000.bin
|
rnplus/ESP8266-relay-board-firmware
|
flash1.sh
|
Shell
|
gpl-2.0
| 87 |
VERSION=5.1.1
NAME=gmp
DESC=
DEPENDS=
CONFLICTS=
SOURCES=(ftp://ftp.gnu.org/gnu/gmp/gmp-5.1.1.tar.bz2)
SOURCES_HASHES=(2fa018a7cd193c78494525f236d02dd6)
PATCHES=("patches/$NAME-$VERSION-seaos-all.patch")
function prepare() {
if ! [ -d $NAME-$VERSION ]; then
tar xf $NAME-$VERSION.tar.bz2
fi
}
function build() {
if ! ../src/$NAME-$VERSION/configure --prefix=/usr --host=$HOST_TRIPLET CC_FOR_BUILD=gcc CXX_FOR_BUILD=g++ --build=$(../src/gmp-5.1.1/config.guess); then
return 1
fi
if ! make DESTDIR=$INSTALL_ROOT -j1 all install; then
return 1
fi
}
|
dbittman/sea
|
apps/porting/pack/packs/gmp/pkg.sh
|
Shell
|
gpl-2.0
| 562 |
#!/bin/sh
# Copyright (C) 2001-2010 Wormux Team.
# Copyright (C) 2010 The Mana World Development Team.
# Copyright (C) 2012 The Mana Developers
# Copyright (C) 2013 Bertram
new_year="$1"
copyright_notice="Bertram"
: ${new_year:?Missing parameter: year} >/dev/null
[ ! -e src ] && echo "This script should be ran from the top repository dir" && exit 2
tmp_file=$(mktemp ${TMPDIR:-/tmp}/XXXXX)
# update the dates, creating the interval if it doesn't exist yet
find src/ -iname "*.cpp" -or -iname "*.h" -or -iname "*.hpp" |
xargs gsed -i "/Copyright.*$copyright_notice/ s,\(20[0-9]*\) \|\(20[0-9]*\)-20[0-9]* ,\1\2-$new_year ,"
# do a semi-automated commit check
git diff > $tmp_file
echo "The next +/- counts mentioning copyrights should match:"
grep "^[-+][^-+]" $tmp_file | sort | uniq -c
echo "If they don't, try finding the offending files with grep -rl <\$bad_line>"
rm -f "$tmp_file"
# Indicate the source file that may miss the copyright notice.
echo "Those files are missing the given Copyright notice."
echo "You might want to check them:"
find src/ -type f -name "*.[Cc][Pp][Pp]" -or -name "*.[Hh]" -or -name "*.[Cc]" -or -name "*.[Hh][Pp][Pp]" | xargs grep -RiL "$copyright_notice"
echo "End of $0 script."
|
grimreaper/ValyriaTear
|
update-copyright.sh
|
Shell
|
gpl-2.0
| 1,226 |
#!/bin/sh
#
#
#
# $Id$
#
#
#
# This file is part of the OpenLink Software Virtuoso Open-Source (VOS)
# project.
#
# Copyright (C) 1998-2014 OpenLink Software
#
# This project is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License, dated June 1991.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# ----------------------------------------------------------------------
# Fix issues with LOCALE
# ----------------------------------------------------------------------
LANG=C
LC_ALL=POSIX
export LANG LC_ALL
PORT=${PORT-1112}
HOST=${HOST-localhost}
DSN="$HOST:$PORT"
LOGFILE=mkdemo.output
DEMO=`pwd`
export DEMO LOGFILE
SILENT=${SILENT-0}
#==============================================================================
# Standard functions
#==============================================================================
ECHO()
{
echo "$*" | tee -a $DEMO/$LOGFILE
}
RUN()
{
echo "+ $*" >> $DEMO/$LOGFILE
STATUS=1
if test $SILENT -eq 1
then
eval $* >> $DEMO/$LOGFILE 2>/dev/null
else
eval $* >> $DEMO/$LOGFILE
fi
STATUS=$?
}
LINE()
{
ECHO "====================================================================="
}
BREAK()
{
ECHO ""
ECHO "---------------------------------------------------------------------"
ECHO ""
}
BANNER()
{
ECHO ""
LINE
ECHO "= $*"
ECHO "= " `date`
LINE
ECHO ""
}
START_SERVER()
{
timeout=60
ECHO "Starting Virtuoso DEMO server ..."
virtuoso +wait
starth=`date | cut -f 2 -d :`
starts=`date | cut -f 3 -d :|cut -f 1 -d " "`
while true
do
sleep 6
if (netstat -an | grep "$PORT" | grep LISTEN > /dev/null)
then
ECHO "Virtuoso server started"
return 0
fi
nowh=`date | cut -f 2 -d :`
nows=`date | cut -f 3 -d : | cut -f 1 -d " "`
nowh=`expr $nowh - $starth`
nows=`expr $nows - $starts`
nows=`expr $nows + $nowh \* 60`
if test $nows -ge $timeout
then
ECHO "***FAILED: Could not start Virtuoso DEMO Server within $timeout seconds"
exit 1
fi
done
}
STOP_SERVER()
{
RUN isql $DSN dba dba '"EXEC=raw_exit();"' VERBOSE=OFF PROMPT=OFF ERRORS=STDOUT
}
DO_COMMAND()
{
command=$1
uid=${2-dba}
passwd=${3-dba}
isql $DSN $uid $passwd ERRORS=stdout VERBOSE=OFF PROMPT=OFF "EXEC=$command" >> $DEMO/$LOGFILE
if test $? -ne 0
then
ECHO "***FAILED: $command"
else
ECHO "PASSED: $command"
fi
}
CHECK_LOG()
{
passed=`grep "PASSED:" $DEMO/$LOGFILE | wc -l`
failed=`grep "\*\*\*.*FAILED:" $DEMO/$LOGFILE | wc -l`
aborted=`grep "\*\*\*.*ABORTED:" $DEMO/$LOGFILE | wc -l`
ECHO ""
LINE
ECHO "= Checking log file $LOGFILE for statistics:"
ECHO "="
ECHO "= Total number of tests PASSED : $passed"
ECHO "= Total number of tests FAILED : $failed"
ECHO "= Total number of tests ABORTED : $aborted"
LINE
ECHO ""
if (expr $failed + $aborted \> 0 > /dev/null)
then
ECHO "*** Not all tests completed successfully"
ECHO "*** Check the file $LOGFILE for more information"
fi
}
LOAD_SQL()
{
sql=$1
uid=${2-dba}
passwd=${3-dba}
RUN isql $DSN $uid $passwd ERRORS=stdout VERBOSE=OFF PROMPT=OFF $sql
if test $? -ne 0
then
ECHO "***FAILED: LOAD $sql"
else
ECHO "PASSED: LOAD $sql"
fi
}
MAKE_WS()
{
# DO_COMMAND "create user WS" dba dba
# DO_COMMAND "user_set_qualifier('WS', 'WS')" dba dba
DO_COMMAND "create user SOAP" dba dba
DO_COMMAND "user_set_qualifier('SOAP', 'WS')" dba dba
DO_COMMAND "grant select on Demo.demo.Customers to SOAP" dba dba
DO_COMMAND "grant select on Demo.demo.Orders to SOAP" dba dba
DO_COMMAND "grant select on Demo.demo.Order_Details to SOAP" dba dba
DO_COMMAND "grant select on Demo.demo.Products to SOAP" dba dba
DO_COMMAND "grant select on Demo.demo.Categories to SOAP" dba dba
}
LOAD_XML_DAV()
{
doc_col_id=$1
doc_col_parent=$2
doc_col_name=$3
doc_col_fs_path=$4
_cvs_entries='CVS/Entries'
# ECHO "CVS ENTRIES in " $HOME/docsrc$doc_col_fs_path$_cvs_entries
doc_files=`cat $HOME/docsrc$doc_col_fs_path$_cvs_entries | grep '^[^D]' | cut -f 2 -d '/'`
# ECHO "DOC FILES :" $doc_files
TMP=/tmp/isql.$$
ECHO "Building sql script for loading $doc_col_fs_path files"
echo "insert into WS.WS.SYS_DAV_COL (col_id, col_name ,col_owner, col_group, col_parent, col_cr_time, col_mod_time, col_perms) values ($doc_col_id, '$doc_col_name', 1, 1, $doc_col_parent, now(), now(), '110100100');" > $TMP
cd $HOME/docsrc/$doc_col_fs_path
for filename in $doc_files
do
echo "insert into WS.WS.SYS_DAV_RES (RES_OWNER, RES_COL, RES_TYPE, RES_CR_TIME, RES_MOD_TIME, RES_PERMS, RES_ID, RES_NAME, RES_CONTENT) values (1, $doc_col_id, http_mime_type('$filename'), now(), now(), '110100100', WS.WS.getid ('R'), '$filename', file_to_string ('$HOME/docsrc$doc_col_fs_path$filename'));" >> $TMP
done
LOAD_SQL $TMP dba dba
rm $TMP
cd $DEMO
}
LOAD_XML_DAV_SS()
{
doc_col_id=$1
doc_col_parent=$2
doc_col_name=$3
doc_col_fs_path=$4
stylesheet=$5
_cvs_entries='CVS/Entries'
# ECHO "CVS ENTRIES in " $HOME/docsrc$doc_col_fs_path$_cvs_entries
doc_files=`cat $HOME/docsrc$doc_col_fs_path$_cvs_entries | grep '^[^D]' | cut -f 2 -d '/'`
# ECHO "DOC FILES :" $doc_files
TMP=/tmp/isql.$$
ECHO "Building sql script for loading $doc_col_fs_path files"
echo "insert into WS.WS.SYS_DAV_COL (col_id, col_name ,col_owner, col_group, col_parent, col_cr_time, col_mod_time, col_perms) values ($doc_col_id, '$doc_col_name', 1, 1, $doc_col_parent, now(), now(), '110100100');" > $TMP
cd $HOME/docsrc/$doc_col_fs_path
for filename in $doc_files
do
echo "WS.WS.INSERT_RES_XSLT (1, $doc_col_id, '$filename', http_mime_type ('$filename'), file_to_string ('$HOME/docsrc$doc_col_fs_path$filename', '$stylesheet'));" >> $TMP
done
LOAD_SQL $TMP dba dba
rm $TMP
cd $DEMO
}
XSL_TRANSFORM ()
{
xml_src=$1
xsl_stylesheet=$2
dst=$3
src_path=$4
xsl_params=$5
TMP=/tmp/isql.$$
echo "SRC: $xml_src"
echo "SRCPATH: $src_path"
DO_COMMAND "WS.WS.XML_ENTITY_TO_FILE (xslt ('$xsl_stylesheet', \
xtree_doc (file_to_string ('$xml_src'), 0, \
'$src_path'), \
vector ($xsl_params)), \
'$dst');"
}
DUMP_XML_ENTITY ()
{
xml_src=$1
src_path=$2
dst=$3
TMP=/tmp/isql.$$
echo "SRC: $xml_src"
echo "DST: $dst"
echo "WS.WS.XML_ENTITY_TO_FILE (xtree_doc (file_to_string ('$xml_src'), 0,\
'src_path'), '$dst');" > $TMP
LOAD_SQL $TMP dba $DBPWD
rm $TMP
}
#==============================================================================
# MAIN ROUTINE
#==============================================================================
LOAD_SQL $HOME/binsrc/samples/xquery/presetup.sql dba dba
LOAD_SQL $HOME/binsrc/samples/xquery/desk.sql dba dba
LOAD_SQL $HOME/binsrc/samples/xquery/metadata.sql dba dba
DO_COMMAND "delete from WS.WS.SYS_DAV_COL where col_id=120" dba dba
DO_COMMAND "insert into WS.WS.SYS_DAV_COL (col_id, col_name ,col_owner, col_group, col_parent, col_cr_time, col_mod_time, col_perms) values (120, 'xqdemo', 1, 1, 1, now(), now(), '110100100')" dba dba
files=`ls $HOME/binsrc/samples/xquery/data`
for i in $files
do
DO_COMMAND "insert into WS.WS.SYS_DAV_RES (RES_OWNER, RES_COL, RES_TYPE, RES_CR_TIME, RES_MOD_TIME, RES_PERMS, RES_ID, RES_NAME, RES_CONTENT) values (1, 120, http_mime_type('$i'), now(), now(), '110100100', WS.WS.getid ('R'), '$i', file_to_string ('$HOME/binsrc/samples/xquery/data/$i'))" dba dba
done
LOAD_SQL $HOME/binsrc/samples/xquery/postsetup.sql dba dba
DO_COMMAND checkpoint
|
v7fasttrack/virtuoso-opensource
|
binsrc/samples/xquery/files2dav.sh
|
Shell
|
gpl-2.0
| 8,249 |
# written by jaeyong
# desc: a script that runs an click-based openflow experiment
if ! [ `whoami` == "root" ]; then
echo "should be run by root"
exit -1
fi
__CLICK=/home/yjaeyong/VisualXSwitch/userlevel/click
__SEC_CHAN=/home/netcs/packages/icc-sdn/openflow-1.0.0/secchan/ofprotocol
# desc:
# a script that automatically generates click script for given input params
# input param 1: tap mac addr
# input param 2: tap ip addr
# input param 3: dpid
# input param ~n: interfaces
generate_click_script() {
TAP_MAC_ADDR=$1;
TAP_IP_ADDR=$2;
DPID=$3;
# parse the next variable arguments which supposed to be
# interface names
for((i=4; i<=$#; i++))
do
eval "interfaces=\$interfaces\" \${$i}\""
done
echo "rc :: RConn();"
echo "dp :: Datapath( HOST_ADDR $TAP_MAC_ADDR,"
cnt=1;
for i in $interfaces
do
echo ""
sudo ifconfig $i up 0.0.0.0
macaddr=$(ifconfig $i | awk "/$i/ { print \$5}")
echo -e "\t PORT${cnt}_ADDR $macaddr,"
echo -e "\t PORT${cnt}_NAME $i,"
cnt=$(($cnt+1))
done
echo ""
echo -e "\tDPID $DPID );"
echo ""
echo "fh :: FromHost(DEVNAME tap0, DST $TAP_IP_ADDR/24, ETHER $TAP_MAC_ADDR );"
echo ""
echo "th :: ToHost(tap0);"
cnt=0;
for i in $interfaces
do
echo ""
echo "fd$cnt :: FromDevice( $i, SNIFFER false, PROMISC true, HEADROOM 34, CAPTURE LINUX );"
echo "td$cnt :: ToDevice( $i, DEBUG 0 );"
cnt=$(($cnt+1))
done
echo ""
echo "rc -> [0]dp[0] -> rc;"
echo "fh -> [1]dp[1] -> th;"
cnt=0;
offseted_cnt=2;
for i in $interfaces
do
echo "fd$cnt -> [$offseted_cnt]dp[$offseted_cnt] -> Queue(10000)-> td$cnt;"
cnt=$(($cnt+1))
offseted_cnt=$(($offseted_cnt+1))
done
}
###############################################################################################################
# experiment body
###############################################################################################################
# first kill ofprotocols and click
sudo killall -9 ofprotocol click &> /dev/null
# launch ofprotocol
$__SEC_CHAN unix:/var/run/dp0.sock tcp:210.125.84.74:6655 &> /tmp/ofprotocol-output &
#$__SEC_CHAN unix:/var/run/dp0.sock tcp:210.125.84.74:6607 &> /tmp/ofprotocol-output &
sleep 2
generate_click_script de:ad:be:ef:00:00 192.168.0.1 1 eth3 eth5 eth1 eth0 > /tmp/tmp.click
$__CLICK /tmp/tmp.click 2>&1 1> output | tee /tmp/tmp
#gdb $__CLICK
|
JaeyongYoo/VisualXSwitch
|
papers/2012-CFI/run-experiment.sh
|
Shell
|
gpl-2.0
| 2,357 |
#! /bin/sh
# Copyright (C) 2011-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check that remake rules works from subdirectories, even when makefiles
# are not named "Makefile".
# See also the other similar tests 'remake-subdir*.sh', and the
# related test 'aclocal5.sh'.
. test-init.sh
magic1='::MagicString::One::'
magic2='__MagicString__Two__'
debug_info ()
{
grep -i magic configure build.in build.mk sub/build.in sub/build.mk
}
cat > configure.ac <<END
AC_INIT([$me], [1.0])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([build.mk:build.in])
AC_CONFIG_FILES([sub/build.mk:sub/build.in])
AC_SUBST([MAGIC], [magic])
AC_OUTPUT
END
cat > build.am <<'END'
AM_MAKEFLAGS = -f build.mk
SUBDIRS = sub
END
mkdir sub
cat > sub/build.am <<'END'
AM_MAKEFLAGS = -f build.mk
END
$ACLOCAL
$AUTOCONF
$AUTOMAKE
./configure
ls -l # For debugging.
$MAKE -f build.mk
debug_info
$sleep
sed "s|magic|$magic1|" configure.ac > t
mv -f t configure.ac
cd sub
$MAKE -f build.mk build.mk
cd ..
debug_info
$FGREP $magic1 configure
$FGREP $magic1 build.mk
$FGREP $magic1 sub/build.mk
$sleep
cd sub
echo MAGIC = $magic2 >> build.am
$MAKE -f build.mk build.mk
cd ..
debug_info
$FGREP $magic2 sub/build.mk
$FGREP $magic2 sub/build.in
$FGREP $magic1 sub/build.in sub/build.mk && exit 1
$FGREP $magic2 build.in build.mk && exit 1
:
|
komh/automake-os2
|
t/remake-subdir2.sh
|
Shell
|
gpl-2.0
| 1,915 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_CONF=default
CND_DISTDIR=dist
TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/Campainha_joao-PIC12F1840.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
OUTPUT_BASENAME=Campainha_joao-PIC12F1840.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
PACKAGE_TOP_DIR=campainhajoao-pic12f1840/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/package
rm -rf ${TMPDIR}
mkdir -p ${TMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory ${TMPDIR}/campainhajoao-pic12f1840/bin
copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/package/campainhajoao-pic12f1840.tar
cd ${TMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/campainhajoao-pic12f1840.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${TMPDIR}
|
sbalula/lets-PIC-with-C
|
Campainha_joao-PIC12F1840/nbproject/Package-default.bash
|
Shell
|
gpl-2.0
| 1,461 |
#!/usr/bin/env bash
set -euo pipefail
echo
echo "Pass \"--init admin <path-data> 10.0.2.15 4433\" to initialise client"
echo
zyn-cli \
--path-to-cert /etc/ssl/certs/zyn-test.pem \
-vv \
--remote-hostname zyn \
--debug-protocol \
--password admin \
"$@"
|
stbd/zyn
|
vm/development/files/zyn-run-cli-client.sh
|
Shell
|
gpl-2.0
| 279 |
#!/sbin/sh
# _ _ _ _ __ _
# / \ _ __ ___| |__ (_) |/ /___ _ __ _ __ ___| |
# / _ \ | '__/ __| '_ \| | ' // _ \ '__| '_ \ / _ \ |
# / ___ \| | | (__| | | | | . \ __/ | | | | | __/ |
# /_/ \_\_| \___|_| |_|_|_|\_\___|_| |_| |_|\___|_|
#
# Copyright 2015 Łukasz "JustArchi" Domeradzki
# Contact: [email protected]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# exit 0 -> All fine, we're running Lollipop+
# exit 1 -> All fine, we're running pre-Lollipop
# exit 2 -> No build.prop detected, or invalid value, assume Lollipop+
APKS="/system/app/Synapse/Synapse.apk"
if [[ -f "/system/build.prop" ]]; then
SDK="$(grep "ro.build.version.sdk" "/system/build.prop" | cut -d '=' -f 2)"
if [[ -n "$SDK" ]]; then
if [[ "$SDK" -ge 21 ]]; then
exit 0
else
for APK in $APKS; do
APK_DIR="$(dirname "$APK")"
mv "$APK" "${APK_DIR}/../"
rm -rf "$APK_DIR"
done
exit 1
fi
else
exit 2
fi
else
exit 2
fi
exit 0
|
ea4862/ArchiKernel_cm12.1
|
archikernel/flasher/core/fixPreLollipop.sh
|
Shell
|
gpl-2.0
| 1,495 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.