code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
config() {
NEW="$1"
OLD="`dirname $NEW`/`basename $NEW .new`"
# If there's no config file by that name, mv it over:
if [ ! -r $OLD ]; then
mv $NEW $OLD
elif [ "`cat $OLD | md5sum`" = "`cat $NEW | md5sum`" ]; then # toss the redundant copy
rm $NEW
fi
# Otherwise, we leave the .new copy for the admin to consider...
}
config etc/sqlmap.conf.new
|
panosmdma/SlackOnly-SlackBuilds
|
network/sqlmap/doinst.sh
|
Shell
|
mit
| 367 |
#!/bin/sh
curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh | sh
brew doctor
brew install cask
brew doctor
# homebrew cask updater: https://github.com/buo/homebrew-cask-upgrade
brew tap buo/cask-upgrade
|
pkskelly/osx-install
|
scripts/homebrew-install.sh
|
Shell
|
mit
| 235 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-1477-1
#
# Security announcement date: 2012-06-15 00:00:00 UTC
# Script generation date: 2017-01-01 21:02:39 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - apt:0.8.16~exp12ubuntu10.2
#
# Last versions recommanded by security team:
# - apt:0.8.16~exp12ubuntu10.21
#
# CVE List:
# - CVE-2012-0954
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade apt=0.8.16~exp12ubuntu10.21 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2012/USN-1477-1.sh
|
Shell
|
mit
| 627 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_CONF=default
CND_DISTDIR=dist
TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/GIT.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
OUTPUT_BASENAME=GIT.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
PACKAGE_TOP_DIR=git.x/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/package
rm -rf ${TMPDIR}
mkdir -p ${TMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory ${TMPDIR}/git.x/bin
copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/package/git.x.tar
cd ${TMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/git.x.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${TMPDIR}
|
royaraya16/PIC
|
nbproject/Package-default.bash
|
Shell
|
mit
| 1,345 |
#!/bin/bash
source `dirname $0`/../common.sh
docker run -v $OUTPUT_DIR:/tmp/output -v $CACHE_DIR:/tmp/cache -e VERSION=2.1.2 -e SVN_URL=http://svn.ruby-lang.org/repos/ruby/trunk -e RELNAME=branches/ruby_2_1@47326 -e STACK=cedar hone/ruby-builder:cedar
|
hone/docker-heroku-ruby-builder
|
rubies/cedar/ruby-2.1r47326.sh
|
Shell
|
mit
| 255 |
#!/bin/bash
ALL_ARGS=$1
RESULTS_FILE=$2
INPUT_FOLDER="./data"
TEMP_FOLDER=$(pwd)/tmp
CACHE=$(pwd)/cache1
mkdir -p $TEMP_FOLDER
echo $ALL_ARGS
IFS=',' read -a myarray <<< "$ALL_ARGS"
b=${myarray[0]}
t=${myarray[1]}
s=${myarray[2]}
STARTTIME=$(date +%s%3N)
ss="${s/-/,}"
docker run -v $INPUT_FOLDER:/data \
-v $TEMP_FOLDER:/results \
-v $CACHE:/usr/local/n3unit/resources \
-i --rm n3unit -i /data/$t/$ss/$b -s $s -o /results/$b-n3unit.ttl --count &> /dev/null
ENDTIME=$(date +%s%3N)
ERRORCOUNT=`sed '/:errorCount :count/!d' $TEMP_FOLDER/$b-n3unit.ttl | sed -e 's/[^0-9]//g'`
if [ -z "$ERRORCOUNT" ]; then
ERRORCOUNT=0
fi
echo "$t,$s,$b,$(($ENDTIME - $STARTTIME)),$ERRORCOUNT" >> $RESULTS_FILE
cp $TEMP_FOLDER/$b-n3unit.ttl $INPUT_FOLDER/$t/$s/$b-n3unit.ttl
rm -f $TEMP_FOLDER/$b-n3unit.ttl
|
IDLabResearch/validation-benchmark
|
validation-reasoning-framework/single-run.sh
|
Shell
|
mit
| 799 |
# Install Homebrew
#ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
#brew tap homebrew/versions
#brew tap homebrew/dupes
#brew update
#brew upgrade
# Install packages
apps=(
ack
ag
bats
#cmake
coreutils
dockutil
ffmpeg
fasd
gifsicle
git
gnu-sed
grep
#gnu-sed --with-default-names
#grep --with-default-names
hub
httpie
imagemagick
jq
mackup
peco
psgrep
python
shellcheck
ssh-copy-id
tmux
tree
vim
wget
awscli
#aws-sam-cli
dos2unix
ssh-copy-id
tree
git-extras
)
brew install "${apps[@]}"
# Git comes with diff-highlight, but isn't in the PATH
#ln -sf "$(brew --prefix)/share/git-core/contrib/diff-highlight/diff-highlight" /usr/local/bin/diff-highlight
|
kevinold/dotfiles
|
install/brew.sh
|
Shell
|
mit
| 834 |
#! /bin/sh
#
# Copyright 2002 Sun Microsystems, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistribution in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of Sun Microsystems, Inc. or the names of
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# This software is provided "AS IS," without a warranty of any
# kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND
# WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY
# EXCLUDED. SUN AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR
# DISTRIBUTING THE SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN
# OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR
# FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR
# PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF
# LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE SOFTWARE,
# EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
#
# You acknowledge that Software is not designed, licensed or intended
# for use in the design, construction, operation or maintenance of
# any nuclear facility.
#
# $Id: setup.sh,v 1.5.2.4 2002/01/25 20:12:23 vijaysr Exp $
if [ -z "$JAVA_HOME" ]
then
JAVACMD=`which java`
if [ -z "$JAVACMD" ]
then
echo "Cannot find JAVA. Please set your PATH."
exit 1
fi
JAVA_BINDIR=`dirname $JAVACMD`
JAVA_HOME=$JAVA_BINDIR/..
fi
if [ -z "$J2EE_HOME" ]
then
echo "Please set J2EE_HOME."
exit 1
fi
JAVACMD=$JAVA_HOME/bin/java
ANT_HOME=`[ -d ./src/lib/ant ] && echo ./src/lib/ant || echo ./lib/ant`
ANT_CLASSPATH=$JAVA_HOME/lib/tools.jar
ANT_CLASSPATH=$ANT_HOME/lib/ant.jar:$ANT_HOME/lib/parser.jar:$ANT_HOME/lib/jaxp.jar:$ANT_CLASSPATH
ANT_CLASSPATH=$J2EE_HOME/lib/j2ee.jar:$ANT_CLASSPATH
echo $ANT_CLASSPATH
$JAVACMD -classpath $ANT_CLASSPATH -Dant.home=$ANT_HOME -Dj2ee.home=$J2EE_HOME org.apache.tools.ant.Main -buildfile setup.xml "$@"
|
pitpitman/GraduateWork
|
petstore1.3_01/setup.sh
|
Shell
|
mit
| 2,499 |
# script to insmod kernel module & initialize char device
# Borrowed from Kevin Farley with permission
make
sudo insmod lab6.ko
MAJOR= dmesg | tail -n 1 | awk "{print \$5}"
if [ -z $MAJOR ]
then
MAJOR= dmesg | tail -n 1 | awk "{print \$4}"
fi
#sudo rm /dev/interface
sudo mknod /dev/interface c 250 0
sudo chmod a+w /dev/interface
gcc -o test test.c
exit;
|
okeltw/OpSys
|
Lab6/run.sh
|
Shell
|
mit
| 357 |
#!/bin/sh
## esxiconf_backup.sh
## Author: James White ([email protected])
## Version 0.2
##
## Description:
## Creates a backup of the ESXi host config
## Downloads the generated backup and stores it in the specified directory
## Adds unique datestamp for storing multiple backup copies
## More info about the ESXi config backup be found here:
## http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2042141
##
if [ $# -ne 1 ] ; then
echo "usage: $0 /vmfs/volumes/datastore/folder"
exit 1
fi
# Specify backup dir via parameter to avoid editing the script directly
BACKUP_DIR=$1
# Check to make sure path is valid
cd "${BACKUP_DIR}" || echo "Backup directory provided is not accessible" exit 1
# Hostname values
HOSTNAME=$(hostname)
HOSTNAME_FQDN=$(hostname -f)
# ESXi version values
ESXI_VERSION_BASE=$(vmware -v | awk '{ print $3 }' | sed "s/\./-/g")
ESXI_VERSION_BUILD=$(vmware -v | awk '{ print $4 }')
# For the output of the backup.tgz file
DATE_TIMESTAMP=$(date +"%F_%H-%M-%S")
TGZ_FILE="configBundle_${ESXI_VERSION_BASE}-${ESXI_VERSION_BUILD}_${HOSTNAME}_${DATE_TIMESTAMP}.tgz"
echo "Syncing config..."
vim-cmd hostsvc/firmware/sync_config
echo "Generating ESXi config backup..."
CREATE_BACKUP_CMD=$(vim-cmd hostsvc/firmware/backup_config)
# We need to extract the http url and add in the set hostname of ESXi to form a valid URL for wget
BACKUP_HTTP_PATH=$(echo "${CREATE_BACKUP_CMD}"| awk '{ print $7 }' | sed "s/*/${HOSTNAME_FQDN}/g")
echo "Downloading generated ESXi config backup archive..."
if ! wget -q -O "${TGZ_FILE}" "${BACKUP_HTTP_PATH}"
then
echo "An error occurred while downloading the config backup"
exit 1
else
echo "ESXi config backup has been successfully downloaded!"
exit 0
fi
|
jamesmacwhite/esxiconfig-backup
|
esxiconfig_backup.sh
|
Shell
|
mit
| 1,769 |
#!/bin/sh
#############################################
# Created by iMatthewCM on 11/18/2019 #
#################################################################################################
# This script is not an official product of Jamf Software LLC. As such, it is provided without #
# warranty or support. By using this script, you agree that Jamf Software LLC is under no #
# obligation to support, debug, or otherwise maintain this script. Licensed under MIT. #
# #
# NAME: createDepartmentsFromFile.sh #
# DESCRIPTION: This script will read in the contents of a plain text file and create each line #
# of the file as a new department in Jamf Pro. IMPORTANT: Include an empty new line at the #
# bottom of the file, otherwise the final department will not be created! #
#################################################################################################
##############################
# Configure these variables! #
##############################
#Path to a the file containing the department names to add
inputFile="/path/to/input.txt"
#Jamf Pro URL
#Do NOT use a trailing / character!
#Include ports as necessary
jamfProURL="https://myjss.jamfcloud.com"
#Token to use for authentication
token="eyJhbGciOiJIUzI1NiJ9.eyJhdXRoZW50aWNhdGVkLWFwcCI6IkdFTkVSSUMiLCJhdXRoZW50aWNhdGlvbi10eXBlIjoiSlNTIiwiZ3JvdXBzIjpbXSwic3ViamVjdC10eXBlIjoiSlNTX1VTRVJfSUQiLCJ0b2tlbi11dWlkIjoiM2Y0MjNlNjUtMDNiNS00MDA5LTk4N2EtNzljNjVhNWNkOGIxIiwibGRhcC1zZXJ2ZXItaWQiOi0xLCJzdWIiOiIxIiwiZXhwIjoxNTc0MTE0ODYyfQ.WpOcG_1F9IAnbLs5U6BN5ZDW1VUiqWns1Uux6AKpqHE"
#Loop through the file and create the departments
while read department
do
curl -s -H "Authorization: Bearer $token" -H "Content-type: application/json" "$jamfProURL"/uapi/v1/departments -X POST -d "{\"name\": \"$department\"}"
done < /Users/Matthew/Desktop/in.txt
|
iMatthewCM/Jamf-Scripts
|
JamfProAPI/createDepartmentsFromFile.sh
|
Shell
|
mit
| 1,876 |
# http://localhost:8090/controlpanel
# factomd must be running
cd ~/testing/testing/test-plans-and-scripts/factoid-simulator/FactomTests/FactoidTest/
date +"%T"
for ((i=0; i < 1500; i++)); do
./run >> simulator-output
done
date +"%T"
# awk '/Errors/ && $2 !~ /0/' ~/testing/testing/test-plans-and-scripts/factoid-simulator/FactomTests/FactoidTest/simulator-output
|
FactomProject/Testing
|
factoid-simulator/FactomTests/FactoidTest/run-many-times.sh
|
Shell
|
mit
| 376 |
# itclConfig.sh --
#
# This shell script (for sh) is generated automatically by Itcl's
# configure script. It will create shell variables for most of
# the configuration options discovered by the configure script.
# This script is intended to be included by the configure scripts
# for Itcl extensions so that they don't have to figure this all
# out for themselves. This file does not duplicate information
# already provided by tclConfig.sh, so you may need to use that
# file in addition to this one.
#
# The information in this file is specific to a single platform.
# Itcl's version number.
itcl_VERSION='4.0.2'
ITCL_VERSION='4.0.2'
# The name of the Itcl library (may be either a .a file or a shared library):
itcl_LIB_FILE=itcl402.dll
ITCL_LIB_FILE=itcl402.dll
# String to pass to linker to pick up the Itcl library from its
# build directory.
itcl_BUILD_LIB_SPEC='-L/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/win/win32-ix86/pkgs/itcl -litcl402'
ITCL_BUILD_LIB_SPEC='-L/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/win/win32-ix86/pkgs/itcl -litcl402'
# String to pass to linker to pick up the Itcl library from its
# installed directory.
itcl_LIB_SPEC='-LC:/msys/1.0/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/out/win32-ix86/lib/itcl4.0.2 -litcl402'
ITCL_LIB_SPEC='-LC:/msys/1.0/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/out/win32-ix86/lib/itcl4.0.2 -litcl402'
# The name of the Itcl stub library (a .a file):
itcl_STUB_LIB_FILE=itclstub402.lib
ITCL_STUB_LIB_FILE=itclstub402.lib
# String to pass to linker to pick up the Itcl stub library from its
# build directory.
itcl_BUILD_STUB_LIB_SPEC='-L/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/win/win32-ix86/pkgs/itcl -litclstub402'
ITCL_BUILD_STUB_LIB_SPEC='-L/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/win/win32-ix86/pkgs/itcl -litclstub402'
# String to pass to linker to pick up the Itcl stub library from its
# installed directory.
itcl_STUB_LIB_SPEC='-LC:/msys/1.0/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/out/win32-ix86/lib/itcl4.0.2 -litclstub402'
ITCL_STUB_LIB_SPEC='-LC:/msys/1.0/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/out/win32-ix86/lib/itcl4.0.2 -litclstub402'
# String to pass to linker to pick up the Itcl stub library from its
# build directory.
itcl_BUILD_STUB_LIB_PATH='/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/win/win32-ix86/pkgs/itcl/itclstub402.lib'
ITCL_BUILD_STUB_LIB_PATH='/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/win/win32-ix86/pkgs/itcl/itclstub402.lib'
# String to pass to linker to pick up the Itcl stub library from its
# installed directory.
itcl_STUB_LIB_PATH='C:/msys/1.0/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/out/win32-ix86/lib/itcl4.0.2/itclstub402.lib'
ITCL_STUB_LIB_PATH='C:/msys/1.0/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/out/win32-ix86/lib/itcl4.0.2/itclstub402.lib'
# Location of the top-level source directories from which [incr Tcl]
# was built. This is the directory that contains generic, unix, etc.
# If [incr Tcl] was compiled in a different place than the directory
# containing the source files, this points to the location of the sources,
# not the location where [incr Tcl] was compiled.
itcl_SRC_DIR='/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/pkgs/itcl'
ITCL_SRC_DIR='/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/pkgs/itcl'
# String to pass to the compiler so that an extension can
# find installed Itcl headers.
itcl_INCLUDE_SPEC='/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/pkgs/itcl/generic'
ITCL_INCLUDE_SPEC='/home/andreask/dbn/lba/GlobalBuildArena/builds/win32-ix86/tcl/pkgs/itcl/generic'
|
ArcherSys/ArcherSys
|
tcl/lib/itcl4.0.2/itclConfig.sh
|
Shell
|
mit
| 3,781 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods-podTestLibrary1_Example/podTestLibrary1.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods-podTestLibrary1_Example/podTestLibrary1.framework'
fi
|
happyliuzh/podTestLibrary1
|
Example/Pods/Target Support Files/Pods-podTestLibrary1_Example/Pods-podTestLibrary1_Example-frameworks.sh
|
Shell
|
mit
| 2,636 |
#!/bin/bash
# DESCRIPTION
# Defines global settings.
# SETTINGS
# General
set -o nounset # Exit, with error message, when attempting to use an undefined variable.
set -o errexit # Abort script at first error, when a command exits with non-zero status.
set -o pipefail # Returns exit status of the last command in the pipe that returned a non-zero return value.
IFS=$'\n\t' # Defines how Bash splits words and iterates arrays. This defines newlines and tabs as delimiters.
export SYSTEM_LABEL=masterots # Placeholder for system display name.
export SYSTEM_NAME=masterots # Placeholder for system name.
export WORK_PATH=/tmp/downloads # Temporary location for processing of file downloads and installers.
# Ruby
export MRI=2.2.2
export JRUBY=jruby-9.0.0.0.pre2
# Repositories
REPO_RUBY_SETUP=v2.0.0
REPO_GO_SETUP=v0.1.2
REPO_NPM_SETUP=v0.5.0
REPO_SUBLIME_TEXT_SETUP=v2.0.0
REPO_DOTFILES=v15.0.0
# Applications
export DROPBOX_APP_NAME=Dropbox.app
export DROPBOX_APP_URL="https://www.dropbox.com/download?src=index&plat=mac"
export CLOUD_APP_NAME=CloudApp.app
export CLOUD_APP_URL="https://s3.amazonaws.com/downloads.getcloudapp.com/mac/CloudApp-3.3.0.dmg"
export KNOX_APP_NAME=Knox.app
export KNOX_APP_URL="https://d13itkw33a7sus.cloudfront.net/dist/K/Knox-2.3.1.zip"
export ITERM_APP_NAME=iTerm.app
export ITERM_APP_URL="https://iterm2.com/downloads/beta/iTerm2-2_0_0_20141103.zip"
export VIM_EXTENSION_ROOT="$HOME/.vim/bundle"
export VIM_PATHOGEN_EXTENSION_PATH="$HOME/.vim/autoload/pathogen.vim"
export VIM_PATHOGEN_EXTENSION_URL="https://raw.github.com/tpope/vim-pathogen/master/autoload/pathogen.vim"
export VIM_FUGITIVE_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-fugitive"
export VIM_FUGITIVE_EXTENSION_URL="https://github.com/tpope/vim-fugitive.git"
export VIM_UNIMPAIRED_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-unimpaired"
export VIM_UNIMPAIRED_EXTENSION_URL="https://github.com/tpope/vim-unimpaired.git"
export VIM_COMMENTARY_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-commentary"
export VIM_COMMENTARY_EXTENSION_URL="https://github.com/tpope/vim-commentary.git"
export VIM_GIT_GUTTER_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-gitgutter"
export VIM_GIT_GUTTER_EXTENSION_URL="https://github.com/airblade/vim-gitgutter.git"
export VIM_BUNDLER_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-bundler"
export VIM_BUNDLER_EXTENSION_URL="https://github.com/tpope/vim-bundler"
export VIM_RUBY_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-ruby"
export VIM_RUBY_EXTENSION_URL="git://github.com/vim-ruby/vim-ruby.git"
export VIM_TEXT_OBJECT_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-textobj-user"
export VIM_TEXT_OBJECT_EXTENSION_URL="git://github.com/kana/vim-textobj-user.git"
export VIM_TEXT_OBJECT_RUBY_BLOCK_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-textobj-rubyblock"
export VIM_TEXT_OBJECT_RUBY_BLOCK_EXTENSION_URL="git://github.com/nelstrom/vim-textobj-rubyblock.git"
export VIM_RAILS_EXTENSION_PATH="$VIM_EXTENSION_ROOT/vim-rails"
export VIM_RAILS_EXTENSION_URL="git://github.com/tpope/vim-rails.git"
export SUBLIME_TEXT_APP_NAME="Sublime Text.app"
export SUBLIME_TEXT_APP_URL="http://c758482.r82.cf2.rackcdn.com/Sublime%20Text%20Build%203083.dmg"
export SUBLIME_TEXT_EXTENSION_ROOT="$HOME/Library/Application Support/Sublime Text 3/Packages"
export GIT_GUTTER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/GitGutter"
export GIT_GUTTER_EXTENSION_URL="git://github.com/jisaacks/GitGutter.git"
export GIST_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Gist"
export GIST_EXTENSION_URL="git://github.com/condemil/Gist.git"
export KEYMAPS_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Keymaps"
export KEYMAPS_EXTENSION_URL="git://github.com/MiroHibler/sublime-keymaps.git"
export LOCAL_HISTORY_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Local History"
export LOCAL_HISTORY_EXTENSION_URL="git://github.com/vishr/local-history.git"
export CTAGS_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/CTags"
export CTAGS_EXTENSION_URL="git://github.com/SublimeText/CTags.git"
export SIDEBAR_ENHANCEMENTS_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/SideBarEnhancements"
export SIDEBAR_ENHANCEMENTS_EXTENSION_URL="git://github.com/titoBouzout/SideBarEnhancements.git"
export ADVANCED_NEW_FILE_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/AdvancedNewFile"
export ADVANCED_NEW_FILE_EXTENSION_URL="git://github.com/skuroda/Sublime-AdvancedNewFile.git"
export MOVE_TAB_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/MoveTab"
export MOVE_TAB_EXTENSION_URL="git://github.com/SublimeText/MoveTab.git"
export APPLY_SYNTAX_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/ApplySyntax"
export APPLY_SYNTAX_EXTENSION_URL="git://github.com/facelessuser/ApplySyntax.git"
export CHANGE_QUOTES_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/SublimeChangeQuotes"
export CHANGE_QUOTES_EXTENSION_URL="git://github.com/colinta/SublimeChangeQuotes.git"
export CHANGE_QUOTES_EXTENSION_OPTIONS="--branch st2"
export BRACKET_HIGHLIGHTER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/BracketHighlighter"
export BRACKET_HIGHLIGHTER_EXTENSION_URL="git://github.com/facelessuser/BracketHighlighter.git"
export TRAILING_SPACES_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/TrailingSpaces"
export TRAILING_SPACES_EXTENSION_URL="git://github.com/SublimeText/TrailingSpaces.git"
export COLOR_HIGHLIGHTER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Color Highlighter"
export COLOR_HIGHLIGHTER_EXTENSION_URL="git://github.com/Monnoroch/ColorHighlighter.git"
export GUTTER_COLOR_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Gutter Color"
export GUTTER_COLOR_EXTENSION_URL="git://github.com/ggordan/GutterColor.git"
export ALIGNMENT_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Alignment"
export ALIGNMENT_EXTENSION_URL="git://github.com/wbond/sublime_alignment.git"
export WRAP_PLUS_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/WrapPlus"
export WRAP_PLUS_EXTENSION_URL="git://github.com/ehuss/Sublime-Wrap-Plus.git"
export AUTOFILENAME_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/AutoFileName"
export AUTOFILENAME_EXTENSION_URL="git://github.com/BoundInCode/AutoFileName.git"
export AUTOFILENAME_EXTENSION_OPTIONS="--branch st3"
export AUTOPREFIXER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Autoprefixer"
export AUTOPREFIXER_EXTENSION_URL="git://github.com/sindresorhus/sublime-autoprefixer.git"
export EASY_MOTION_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/EasyMotion"
export EASY_MOTION_EXTENSION_URL="git://github.com/tednaleid/sublime-EasyMotion.git"
export EASY_MOTION_EXTENSION_OPTIONS="--branch st3"
export EMMET_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Emmet"
export EMMET_EXTENSION_URL="git://github.com/sergeche/emmet-sublime.git"
export EMMET_LIVE_STYLE_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/LiveStyle"
export EMMET_LIVE_STYLE_EXTENSION_URL="git://github.com/emmetio/livestyle-sublime.git"
export WEB_INSPECTOR_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Web Inspector"
export WEB_INSPECTOR_EXTENSION_URL="git://github.com/sokolovstas/SublimeWebInspector.git"
export LINTER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/SublimeLinter"
export LINTER_EXTENSION_URL="git://github.com/SublimeLinter/SublimeLinter3.git"
export JSCS_LINTER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/SublimeLinter-jscs"
export JSCS_LINTER_EXTENSION_URL="git://github.com/SublimeLinter/SublimeLinter-jscs.git"
export JSCS_FORMATTER_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/JSCS-Formatter"
export JSCS_FORMATTER_EXTENSION_URL="git://github.com/TheSavior/SublimeJSCSFormatter.git"
export RUBY_EXTRACT_METHOD_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/RubyExtractMethod"
export RUBY_EXTRACT_METHOD_EXTENSION_URL="git://github.com/pashamur/ruby-extract-method.git"
export RUBY_SLIM_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Ruby-Slim.tmbundle"
export RUBY_SLIM_EXTENSION_URL="git://github.com/slim-template/ruby-slim.tmbundle.git"
export RUBOCOP_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/RuboCop"
export RUBOCOP_EXTENSION_URL="git://github.com/pderichs/sublime_rubocop.git"
export MARKDOWN_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/sublime-markdown-extended"
export MARKDOWN_EXTENSION_URL="git://github.com/jonschlinkert/sublime-markdown-extended.git"
export SCSS_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/SCSS"
export SCSS_EXTENSION_URL="git://github.com/kuroir/SCSS.tmbundle.git"
export CSSCOMB_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/CSScomb"
export CSSCOMB_EXTENSION_URL="git://github.com/csscomb/csscomb-for-sublime.git"
export COFFEE_SCRIPT_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/CoffeeScript"
export COFFEE_SCRIPT_EXTENSION_URL="git://github.com/Xavura/CoffeeScript-Sublime-Plugin.git"
export HTML_PRETTIFY_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Sublime-HTMLPrettify"
export HTML_PRETTIFY_EXTENSION_URL="https://github.com/victorporof/Sublime-HTMLPrettify.git"
export DASH_DOC_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/DashDoc"
export DASH_DOC_EXTENSION_URL="git://github.com/farcaller/DashDoc.git"
export TABLE_EDITOR_EXTENSION_PATH="$SUBLIME_TEXT_EXTENSION_ROOT/Table Editor"
export TABLE_EDITOR_EXTENSION_URL="https://github.com/vkocubinsky/SublimeTableEditor.git"
export SUBLIME_URL_HANDLER_APP_NAME="SublHandler.app"
export SUBLIME_URL_HANDLER_APP_URL="https://github.com/downloads/asuth/subl-handler/SublHandler.zip"
export ALFRED_APP_NAME="Alfred 2.app"
export ALFRED_APP_URL="https://cachefly.alfredapp.com/Alfred_2.6_374.zip"
export TEXTEXPANDER_APP_NAME=TextExpander.app
export TEXTEXPANDER_APP_URL="http://dl.smilesoftware.com/com.smileonmymac.textexpander/TextExpander.zip"
export PATH_FINDER_APP_NAME="Path Finder.app"
export PATH_FINDER_APP_URL="http://get.cocoatech.com/PF7.zip"
export CHROME_APP_NAME="Google Chrome.app"
export CHROME_APP_URL="https://dl.google.com/chrome/mac/stable/GGRM/googlechrome.dmg"
export CHROME_EXTENSION_ROOT="$HOME/Apps/Google/Chrome/Extensions"
export RAILS_PANEL_EXTENSION_PATH="$CHROME_EXTENSION_ROOT/RailsPanel"
export RAILS_PANEL_EXTENSION_URL="git://github.com/dejan/rails_panel.git"
export EMBER_INSPECTOR_EXTENSION_PATH="$CHROME_EXTENSION_ROOT/EmberInspector"
export EMBER_INSPECTOR_EXTENSION_URL="git://github.com/tildeio/ember-extension.git"
export CHROME_LOGGER_EXTENSION_PATH="$CHROME_EXTENSION_ROOT/ChromeLogger"
export CHROME_LOGGER_EXTENSION_URL="git://github.com/ccampbell/chromelogger.git"
export FIREFOX_APP_NAME=Firefox.app
export FIREFOX_APP_URL="https://download-installer.cdn.mozilla.net/pub/firefox/releases/36.0.1/mac/en-US/Firefox%2036.0.1.dmg"
export OPERA_APP_NAME=Opera.app
export OPERA_APP_URL="http://get.geo.opera.com.global.prod.fastly.net/pub/opera/desktop/28.0.1750.48/mac/Opera_28.0.1750.48_Setup.dmg"
export OMNIGRAFFLE_APP_NAME="OmniGraffle Professional 5.app"
export OMNIGRAFFLE_APP_URL="http://downloads2.omnigroup.com/software/MacOSX/10.6/OmniGrafflePro-5.4.4.dmg"
export TRANSMIT_APP_NAME=Transmit.app
export TRANSMIT_APP_URL="https://www.panic.com/transmit/d/Transmit%204.4.8.zip"
export ISTATS_APP_NAME="iStat Menus.app"
export ISTATS_APP_URL="http://download.bjango.com/istatmenus"
export BARTENDER_APP_NAME=Bartender.app
export BARTENDER_APP_URL="http://www.macbartender.com/Demo/Bartender.zip"
export SNIPPETS_APP_NAME=Snippets.app
export SNIPPETS_APP_URL="http://www.snippetsapp.com/download/Snippets-1.4.3.zip"
export ACORN_APP_NAME=Acorn.app
export ACORN_APP_URL="http://flyingmeat.com/download/Acorn.zip"
export DOUBLETAKE_APP_NAME=DoubleTake.app
export DOUBLETAKE_APP_URL="http://echoone.com/doubletake/DoubleTake.dmg"
export IMAGE_OPTIM_APP_NAME=ImageOptim.app
export IMAGE_OPTIM_APP_URL="http://imageoptim.com/ImageOptim.tbz2"
export ICONJAR_APP_NAME=IconJar.app
export ICONJAR_APP_URL="https://dl0tgz6ee3upo.cloudfront.net/production/app/builds/004/513/556/original/af0d3fb01dedfcd18215d3413e658d56/IconJar.app.zip"
export VLC_APP_NAME=VLC.app
export VLC_APP_URL="http://get.videolan.org/vlc/2.2.0/macosx/vlc-2.2.0.dmg"
export PG_ADMIN_APP_NAME=pgAdmin3.app
export PG_ADMIN_APP_URL="https://ftp.postgresql.org/pub/pgadmin3/release/v1.20.0/osx/pgadmin3-1.20.0.dmg"
export PSEQUEL_APP_NAME=PSequel.app
export PSEQUEL_APP_URL="http://www.psequel.com/download"
export SEQUEL_PRO_APP_NAME="Sequel Pro.app"
export SEQUEL_PRO_APP_URL="https://sequel-pro.googlecode.com/files/sequel-pro-1.0.2.dmg"
export CHEATSHEET_APP_NAME=CheatSheet.app
export CHEATSHEET_APP_URL="http://mediaatelier.com/CheatSheet/CheatSheet_1.2.2.zip"
export OPEN_OFFICE_APP_NAME=OpenOffice.app
export OPEN_OFFICE_APP_URL="http://sourceforge.net/projects/openofficeorg.mirror/files/4.1.1/binaries/en-US/Apache_OpenOffice_4.1.1_MacOS_x86-64_install_en-US.dmg/download"
export CLOAK_APP_NAME=Cloak.app
export CLOAK_APP_URL="https://s3.amazonaws.com/static.getcloak.com/osx/updates/Release/Cloak-2.0.11.dmg"
export SCREENHERO_APP_NAME=Screenhero.app
export SCREENHERO_APP_URL="http://dl.screenhero.com/update/screenhero/Screenhero.dmg"
export SPEAK_APP_NAME=Speak.app
export SPEAK_APP_URL="https://s3.amazonaws.com/speak-production-releases/darwin/install-speak.dmg"
export VIRTUAL_BOX_APP_NAME=VirtualBox.app
export VIRTUAL_BOX_APP_URL="http://download.virtualbox.org/virtualbox/4.3.26/VirtualBox-4.3.26-98988-OSX.dmg"
export DOXIE_APP_NAME=Doxie.app
export DOXIE_APP_URL="http://www.getdoxie.com/resources/files/download_current_mac.php"
export SONOS_APP_NAME=Sonos.app
export SONOS_APP_URL="http://www.sonos.com/redir/controller_software_mac"
export APP_CLEANER_APP_NAME=AppCleaner.app
export APP_CLEANER_APP_URL="http://www.freemacsoft.net/downloads/AppCleaner_2.3.zip"
export HAZEL_APP_NAME=Hazel.prefPane
export HAZEL_APP_URL="http://www.noodlesoft.com/Products/Hazel/download"
export TRAILER_APP_NAME=Trailer.app
export TRAILER_APP_URL="http://ptsochantaris.github.io/trailer/trailer130.zip"
export CARBON_COPY_CLONER_APP_NAME="Carbon Copy Cloner.app"
export CARBON_COPY_CLONER_APP_URL="http://c74b26775831609a3cf2-8064f6cbda3d6f2abd1c53b8bc16b17e.r26.cf5.rackcdn.com/ccc-4.0.6.4022.zip"
export QUICK_LOOK_PLAIN_TEXT_APP_NAME="QLStephen.qlgenerator"
export QUICK_LOOK_PLAIN_TEXT_APP_URL="https://github.com/downloads/whomwah/qlstephen/QLStephen.qlgenerator.zip"
export KSDIFF_APP_NAME="ksdiff"
export KSDIFF_APP_URL="http://cdn.kaleidoscopeapp.com/releases/ksdiff-122.zip"
|
masterots/osx_setup
|
settings/settings.sh
|
Shell
|
mit
| 14,149 |
#!/usr/bin/env bash
usage() {
cat <<HERE
usage: git branch-extras [-m|--mergable|mergable] <branch>
# ask whether <branch> can be merged into current branch or not.
or: git branch-extras [-l|--list] [--all|--remote|--local]
# show branches which are filtered.
or: git branch-extras [-c|--current|current]
# show current branch name.
or: git branch-extras [-e|--exists|exists] <branch>
# ask whether <branch> exists or not.
or: git branch-extras [-h|--help]
# show me :)
HERE
}
mktemp() {
command mktemp 2>/dev/null || command mktemp -t tmp
}
error() {
local msg
for msg in "$@"; do
echo "$msg" 1>&2
done
exit 1
}
require_one_or_more() {
if [ -z "$2" ] || [[ "$2" =~ ^-.* ]]; then
error "'$1' requires one argument"
fi
}
list() {
list::all
}
list::local() {
git branch --list
}
list::remote() {
git branch --list -r # for listing mode, just in case.
}
list::all() {
git branch --list -a # for listing mode, just in case.
}
current() {
git rev-parse --abbrev-ref HEAD
}
tracked() {
git rev-parse --abbrev-ref $(current)@{upstream}
}
exists() {
local -r name="$1"
[ "_${name}" = "_$(git branch --list "${name}"|sed -e 's/\*//g' -e 's/ //g')" ]
}
mergable() {
local -r source_branch="$1"
local -r current_branch=$(current)
local -r temp_file=$(mktemp)
trap "rm -f ${temp_file}" 1 2 3 15
git format-patch "${current_branch}..${source_branch}" --stdout > "${temp_file}"
[ -s "${temp_file}" ] && git apply "${temp_file}" --check
}
## opt-parse
EXECUTION_COMMAND=
EXECUTION_COMMAND_SUFFIX=
case "${1:--l}" in
'-c' | '--current' | 'current' )
EXECUTION_COMMAND="current"
;;
'-m' | '--mergable' | 'mergable' )
EXECUTION_COMMAND="mergable"
;;
'-e' | '--exists' | 'exists' ) # add ignore
EXECUTION_COMMAND="exists"
require_one_or_more "$1" "$2"
;;
'-l' | '--list' ) # list ignores
EXECUTION_COMMAND="list"
;;
'--all' ) # list ignores
EXECUTION_COMMAND_SUFFIX="::all"
;;
'--local' ) # list ignores
EXECUTION_COMMAND_SUFFIX="::local"
;;
'--remote' ) # list ignores
EXECUTION_COMMAND_SUFFIX="::remote"
;;
'-h' | '--help' ) # list ignores
EXECUTION_COMMAND="usage"
;;
-*) # unregistered options
error "Unknown option '$1'"
;;
*) # arguments which is not option
error "Unknown arguments '$1'"
;;
esac
shift 1
eval "${EXECUTION_COMMAND:-usage}${EXECUTION_COMMAND_SUFFIX:-} $@"
|
jmatsu/git-subcommands
|
src/git-branch-extras.sh
|
Shell
|
mit
| 2,496 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-1705-1
#
# Security announcement date: 2013-01-28 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:07 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libavformat53:4:0.8.5-0ubuntu0.12.04.1
# - libavcodec53:4:0.8.5-0ubuntu0.12.04.1
# - libavcodec53:4:0.8.5-0ubuntu0.12.04.1
#
# Last versions recommanded by security team:
# - libavformat53:4:0.8.17-0ubuntu0.12.04.2
# - libavcodec53:4:0.8.17-0ubuntu0.12.04.2
# - libavcodec53:4:0.8.17-0ubuntu0.12.04.2
#
# CVE List:
# - CVE-2012-2783
# - CVE-2012-2791
# - CVE-2012-2797
# - CVE-2012-2798
# - CVE-2012-2801
# - CVE-2012-2802
# - CVE-2012-2803
# - CVE-2012-2804
# - CVE-2012-5144
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libavformat53=4:0.8.17-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade libavcodec53=4:0.8.17-0ubuntu0.12.04.2 -y
sudo apt-get install --only-upgrade libavcodec53=4:0.8.17-0ubuntu0.12.04.2 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2013/USN-1705-1.sh
|
Shell
|
mit
| 1,157 |
#!/usr/bin/env bash
source init/welcome.sh
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
source developer/developer.sh
#load user config
source applications/applications.sh
#install specified apps
echo 'Installing your Applications\n';
for app in "${apps[@]}"
do
echo "Now installing $app"
brew cask install $app
done
#install specified apps
echo 'Installing your Mac App Store Applications\n';
for masapp in "${masapps[@]}"
do
echo "Now installing $masapp"
brew cask install $masapp
done
echo "Restart into Recovery mode and enable SIP without debug for XtraFinder to work and restart the script."
#sleep 5
source system/system.sh
echo "Your Mac will now restart in 30 seconds for the changes to take effect."
sleep 30
sudo shutdown -r
|
ptbobolakis/dotfiles
|
alpha.sh
|
Shell
|
mit
| 910 |
#!/bin/bash
docker run -d -p 5000:5000 --restart=always -v /var/lib/docker/images:/var/lib/registry --name registry registry:2
|
tobegit3hub/dockerhub-mirror
|
setup-docker-distribution.sh
|
Shell
|
mit
| 128 |
#!/usr/bin/bash
set -o errexit -o noclobber -o noglob -o nounset -o pipefail
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${SCRIPTDIR}/variables.sh"
perm_group "${PKGBUILDPATH}"
perm_group "${CONFIGPATH}"
perm_user "${SRCDEST}"
perm_user "${LOGPATH}"
perm_user "${GNUPGHOME}"
perm_custom "${PKGDEST}" "${UGID}" "${UGID}" 'u=rwX,g=rX,o=rX'
perm_root "${PKGCACHE}"
|
bfritz/nfnty-dockerfiles
|
containers/builder/scripts/test.sh
|
Shell
|
mit
| 392 |
#!/bin/bash
set -ex
if [[ $JAPR_WHEEL == "1" ]]; then
if [[ $JAPR_OS == "Linux" ]]; then
docker run --rm -u `id -u` -w /io -v `pwd`:/io quay.io/pypa/manylinux1_x86_64 /opt/python/$PYTHON_TAG/bin/python setup.py bdist_wheel
docker run --rm -u `id -u` -w /io -v `pwd`:/io quay.io/pypa/manylinux1_x86_64 auditwheel repair dist/*-$PYTHON_TAG-linux_x86_64.whl
rm -r dist/*
cp wheelhouse/*.whl dist
fi
if [[ $JAPR_OS == "Darwin" ]]; then
python setup.py bdist_wheel
fi
ls -lha dist
unzip -l dist/*.whl
twine upload -u squeaky dist/*.whl
fi
|
squeaky-pl/japronto
|
misc/travis/script.sh
|
Shell
|
mit
| 572 |
#!/usr/bin/env bash
#backup.sh, by Richard Willis <[email protected]>
### ADJUST VALUE BELOW ###
# Location of config files
export HOME=/root
### LEAVE THE REST UNTOUCHED ###
function load_config {
local configfile="$HOME/.backupcfg"
if [ ! -e "$configfile" ]; then
echo "Error: backup config file does not exist at location: $configfile"
exit 1
fi
source "$configfile"
if [ -z "$ftpbackupdir" ] || [ -z "$localbackupdir" ] || [ -z "$s3bucket" ] || [ -z "$zfspool" ] || [ -z "$ftpuser" ]; then
echo "Invalid config!"
exit 1
fi
}
# Sync local files with s3
function backup_s3 {
# if it's Sunday
local today=`date +%w`
#if [ $today = 0 ]; then
echo -n "Backing up $localbackupdir to s3..."
s3cmd sync --delete-removed "$localbackupdir" "s3://$s3bucket/"
if [ $? -ne 0 ]; then
echo "s3cmd sync failed!"
exit 1
fi
echo -e "done.\n"
#fi
}
# Remove backups older than 7 days
function clean_backups {
echo -n "Cleaning up old backups at $ftpbackupdir. Removing files older than 2 days..."
find "$ftpbackupdir" -type f -mtime +2 -exec rm -f {} \;
echo "done."
echo -n "Cleaning up old backups at $localbackupdir. Removing files older than 7 days..."
find "$localbackupdir" -type f -mtime +7 -exec rm -f {} \;
echo -e "done.\n"
}
# Umount the backup drive
function unmount_backup_drive {
echo -n "Unmounting FTP backup drive..."
umount "$ftpbackupdir"
echo -e "done.\n"
}
# Create an archived zfs snapshot of a lxc container
function backup_zfs {
local container="$1"
local timestamp=$(date "+%Y-%m-%d")
local snapshot="$zfspool/$container@$timestamp"
local localbackupfile="$localbackupdir/lxc/$container@$timestamp.gz"
local ftpbackupfile="$ftpbackupdir/lxc/$container@$timestamp.gz"
echo -n "Creating zfs snapshot: $snapshot..."
zfs snapshot "$snapshot"
if [ $? -ne 0 ]; then
echo "Unable to create snapshot!"
exit 1
fi
echo "done."
if [ -e "$localbackupfile" ]; then
echo "Backup file already exists: $localbackupfile"
else
echo -n "Creating data backup at location: $localbackupfile..."
zfs send "$snapshot" | gzip > "$localbackupfile"
if [ $? -ne 0 ]; then
echo "Unable to create data backup!"
exit 1
fi
echo "done."
fi
echo -n "Copying $localbackupfile to $ftpbackupfile..."
if [ -e "$ftpbackupfile" ]; then
echo -n "already exists, skipping..."
else
cp "$localbackupfile" "$ftpbackupfile"
fi
echo "done."
echo -n "Destroying snapshot: $snapshot..."
zfs destroy "$snapshot"
if [ $? -ne 0 ]; then
echo "Unable to destroy snapshot!"
exit 1
fi
echo -e "done.\n"
}
function backup_containers {
echo -e "Backing up ZFS container snapshots to FTP drive mounted at $ftpbackupdir and local dir at $localbackupdir...\n"
for container in $(lxc-ls)
do
backup_zfs "$container"
done
}
# Backup rootfs stuff
function backup_host {
echo "Backing up host files to $ftpbackupdir..."
tar -zcf "$ftpbackupdir"/etc.tar.gz /etc
tar -zcf "$ftpbackupdir"/root.tar.gz /root
tar -zcf "$ftpbackupdir"/home.tar.gz /home
echo "Done"
echo "Backing up host files to $localbackupdir..."
tar -zcf "$localbackupdir"/etc.tar.gz /etc
tar -zcf "$localbackupdir"/root.tar.gz /root
tar -zcf "$localbackupdir"/home.tar.gz /home
echo "Done"
}
# Mount the FTP backup drive
function mount_backup_drive {
local ismounted=$(df -h | grep "$ftpuser")
if [ -z "$ismounted" ]; then
echo -n "Attempting to mount FTP backup directory..."
sshfs -o idmap=user "$ftpuser@$ftpuser.your-backup.de:lxc/" "$ftpbackupdir"
if [ $? -ne 0 ]; then
echo "FTP backup directory mount failed!"
exit 1
fi
echo -e "done.\n"
fi
}
function show_backup_size {
local ftpsize=$(du -sh "$ftpbackupdir")
local localsize=$(du -sh "$localbackupdir")
echo -e "\nTotal FTP backup size: $ftpsize"
echo -e "Total local backup size: $localsize\n"
}
function show_tree {
tree -n "$ftpbackupdir" #no color output
tree -n "$localbackupdir" #no color output
}
function main {
local timestamp=$(date)
local user=$(whoami)
echo "Creating backup for $timestamp on $HOSTNAME."
echo -e "Running backup script as $user.\n"
load_config
mount_backup_drive
clean_backups
backup_containers
backup_host
show_backup_size
show_tree
unmount_backup_drive
backup_s3
echo -e "\nAll tasks completed successfully!\n"
}
main
|
badsyntax/server-tools
|
backup/bin/backup.sh
|
Shell
|
mit
| 4,299 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2528-1
#
# Security announcement date: 2012-08-14 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:26 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - icedove:3.0.11-1+squeeze12
#
# Last versions recommanded by security team:
# - icedove:3.0.11-1+squeeze12
#
# CVE List:
# - CVE-2012-1948
# - CVE-2012-1950
# - CVE-2012-1954
# - CVE-2012-1967
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade icedove=3.0.11-1+squeeze12 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2012/DSA-2528-1.sh
|
Shell
|
mit
| 687 |
#!/usr/bin/env bash
THIS_DIR=`dirname $(readlink -f $0)`
source ${THIS_DIR}/common.sh
spark-submit --master local --class interretis.intro.WordCount ${ARTIFACT_PATH} $@
|
MarekDudek/spark-certification
|
src/main/scripts/word-count.sh
|
Shell
|
mit
| 170 |
#!/bin/sh
set -e
echo "Instalando homebrew"
if test ! $(which brew)
then
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
else
echo "Esta instalado en" `which brew`
fi
echo "Terminado"
|
opengraphix/misdotfiles
|
scripts/bin/install-homebrew.sh
|
Shell
|
mit
| 238 |
#!/bin/sh
# The script must run from the expected directory:
cd ${0%/*}
echo "running coveralls script from:"
pwd
# Piping directly from node was only sending one line for some reason
# so I create a file and then cat it which works
node ./node_modules/istanbul-harmony/lib/cli.js cover ./node_modules/mocha/bin/_mocha test/controllers test/models
cat coverage/lcov.info | node ./node_modules/coveralls/bin/coveralls.js
# always exit success, build should not depend on coveralls
exit 0
|
coltonw/revonarchy
|
coveralls.sh
|
Shell
|
mit
| 489 |
#!/usr/bin/env bash
set -e # Abort on error
set -u # Abort on uninitialized variable usage
PROJECT_ROOT=$(git rev-parse --show-toplevel)
# Kill programs running on the required ports
fuser -k 8080/tcp || true # Maps
fuser -k 8082/tcp || true # Gallery
fuser -k 4444/tcp || true # Selenium server
|
spiegelm/xd-testing
|
scripts/kill_applications.sh
|
Shell
|
mit
| 299 |
#!/bin/sh -e
#
# Copyright (c) 2009-2015 Robert Nelson <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
DIR=$PWD
CORES=$(getconf _NPROCESSORS_ONLN)
mkdir -p "${DIR}/deploy/"
patch_kernel () {
cd "${DIR}/KERNEL" || exit
export DIR
/bin/sh -e "${DIR}/patch.sh" || { git add . ; exit 1 ; }
if [ ! "${RUN_BISECT}" ] ; then
git add --all
git commit --allow-empty -a -m "${KERNEL_TAG}-${BUILD} patchset"
fi
cd "${DIR}/" || exit
}
copy_defconfig () {
cd "${DIR}/KERNEL" || exit
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" distclean
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" "${config}"
cp -v .config "${DIR}/patches/ref_${config}"
cp -v "${DIR}/patches/defconfig" .config
cd "${DIR}/" || exit
}
make_menuconfig () {
cd "${DIR}/KERNEL" || exit
make ARCH=${KERNEL_ARCH} CROSS_COMPILE="${CC}" menuconfig
cp -v .config "${DIR}/patches/defconfig"
cd "${DIR}/" || exit
}
make_kernel () {
if [ "x${KERNEL_ARCH}" = "xarm" ] ; then
image="zImage"
else
image="Image"
fi
unset address
##uImage, if you really really want a uImage, zreladdr needs to be defined on the build line going forward...
##make sure to install your distro's version of mkimage
#image="uImage"
#address="LOADADDR=${ZRELADDR}"
cd "${DIR}/KERNEL" || exit
echo "-----------------------------"
echo "make -j${CORES} ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE=\"${CC}\" ${address} ${image} modules"
echo "-----------------------------"
make -j${CORES} ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE="${CC}" ${address} ${image} modules
echo "-----------------------------"
if grep -q dtbs "${DIR}/KERNEL/arch/${KERNEL_ARCH}/Makefile"; then
echo "make -j${CORES} ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE=\"${CC}\" dtbs"
echo "-----------------------------"
make -j${CORES} ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE="${CC}" dtbs
echo "-----------------------------"
fi
KERNEL_UTS=$(cat "${DIR}/KERNEL/include/generated/utsrelease.h" | awk '{print $3}' | sed 's/\"//g' )
if [ -f "${DIR}/deploy/${KERNEL_UTS}.${image}" ] ; then
rm -rf "${DIR}/deploy/${KERNEL_UTS}.${image}" || true
rm -rf "${DIR}/deploy/config-${KERNEL_UTS}" || true
fi
if [ -f ./arch/${KERNEL_ARCH}/boot/${image} ] ; then
cp -v arch/${KERNEL_ARCH}/boot/${image} "${DIR}/deploy/${KERNEL_UTS}.${image}"
cp -v .config "${DIR}/deploy/config-${KERNEL_UTS}"
fi
cd "${DIR}/" || exit
if [ ! -f "${DIR}/deploy/${KERNEL_UTS}.${image}" ] ; then
export ERROR_MSG="File Generation Failure: [${KERNEL_UTS}.${image}]"
/bin/sh -e "${DIR}/scripts/error.sh" && { exit 1 ; }
else
ls -lh "${DIR}/deploy/${KERNEL_UTS}.${image}"
fi
}
make_pkg () {
cd "${DIR}/KERNEL" || exit
deployfile="-${pkg}.tar.gz"
tar_options="--create --gzip --file"
if [ -f "${DIR}/deploy/${KERNEL_UTS}${deployfile}" ] ; then
rm -rf "${DIR}/deploy/${KERNEL_UTS}${deployfile}" || true
fi
if [ -d "${DIR}/deploy/tmp" ] ; then
rm -rf "${DIR}/deploy/tmp" || true
fi
mkdir -p "${DIR}/deploy/tmp"
echo "-----------------------------"
echo "Building ${pkg} archive..."
case "${pkg}" in
modules)
make -s ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE="${CC}" modules_install INSTALL_MOD_PATH="${DIR}/deploy/tmp"
;;
firmware)
make -s ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE="${CC}" firmware_install INSTALL_FW_PATH="${DIR}/deploy/tmp"
;;
dtbs)
if grep -q dtbs_install "${DIR}/KERNEL/arch/${KERNEL_ARCH}/Makefile"; then
make -s ARCH=${KERNEL_ARCH} LOCALVERSION=-${BUILD} CROSS_COMPILE="${CC}" dtbs_install INSTALL_DTBS_PATH="${DIR}/deploy/tmp"
else
find ./arch/${KERNEL_ARCH}/boot/ -iname "*.dtb" -exec cp -v '{}' "${DIR}/deploy/tmp/" \;
fi
;;
esac
echo "Compressing ${KERNEL_UTS}${deployfile}..."
cd "${DIR}/deploy/tmp" || true
tar ${tar_options} "../${KERNEL_UTS}${deployfile}" ./*
cd "${DIR}/" || exit
rm -rf "${DIR}/deploy/tmp" || true
if [ ! -f "${DIR}/deploy/${KERNEL_UTS}${deployfile}" ] ; then
export ERROR_MSG="File Generation Failure: [${KERNEL_UTS}${deployfile}]"
/bin/sh -e "${DIR}/scripts/error.sh" && { exit 1 ; }
else
ls -lh "${DIR}/deploy/${KERNEL_UTS}${deployfile}"
fi
}
make_modules_pkg () {
pkg="modules"
make_pkg
}
make_firmware_pkg () {
pkg="firmware"
make_pkg
}
make_dtbs_pkg () {
pkg="dtbs"
make_pkg
}
/bin/sh -e "${DIR}/tools/host_det.sh" || { exit 1 ; }
if [ ! -f "${DIR}/system.sh" ] ; then
cp -v "${DIR}/system.sh.sample" "${DIR}/system.sh"
fi
unset CC
unset LINUX_GIT
. "${DIR}/system.sh"
/bin/sh -e "${DIR}/scripts/gcc.sh" || { exit 1 ; }
. "${DIR}/.CC"
echo "CROSS_COMPILE=${CC}"
if [ -f /usr/bin/ccache ] ; then
echo "ccache [enabled]"
CC="ccache ${CC}"
fi
. "${DIR}/version.sh"
export LINUX_GIT
unset FULL_REBUILD
#FULL_REBUILD=1
if [ "${FULL_REBUILD}" ] ; then
/bin/sh -e "${DIR}/scripts/git.sh" || { exit 1 ; }
if [ "${RUN_BISECT}" ] ; then
/bin/sh -e "${DIR}/scripts/bisect.sh" || { exit 1 ; }
fi
patch_kernel
copy_defconfig
fi
if [ ! "${AUTO_BUILD}" ] ; then
make_menuconfig
fi
make_kernel
make_modules_pkg
make_firmware_pkg
if grep -q dtbs "${DIR}/KERNEL/arch/${KERNEL_ARCH}/Makefile"; then
make_dtbs_pkg
fi
echo "-----------------------------"
echo "Script Complete"
echo "${KERNEL_UTS}" > kernel_version
echo "eewiki.net: [user@localhost:~$ export kernel_version=${KERNEL_UTS}]"
echo "-----------------------------"
|
robert-budde/ti-linux-kernel-dev
|
tools/rebuild.sh
|
Shell
|
mit
| 6,415 |
#!/usr/bin/env bash
profile=${1:-dev}
echo "profile: $profile"
case "$profile" in
dev)
database=modus
host=localhost
username=$(whoami)
password=
lein_profile=dev
;;
*)
usage
;;
esac
export PGPASSWORD=$password
echo "select pg_terminate_backend(pg_stat_activity.pid) from pg_stat_activity where pg_stat_activity.datname = '$database'" | psql --host $host --username $username postgres
dropdb --host $host --username $username $database
createdb --host $host --username $username $database
lein with-profile $lein_profile flyway migrate
|
C63/modus-clj
|
bootstrap.sh
|
Shell
|
epl-1.0
| 575 |
#!/bin/sh
cp konfig.adb.clj konfig.clj
cp proba-konfig.adb.clj proba-konfig.clj
|
lnmnd/magnet.zer
|
before_script.sh
|
Shell
|
epl-1.0
| 81 |
#!/bin/bash
set -x
clip_IDs=({001..050})
echo ${clip_IDs[*]}
version_IDs=({001..016})
echo ${version_IDs[*]}
object_ID="distractor"
echo ${object_ID}
petavision_dir="/mnt/data/repo/neovision-programs-petavision/Heli/Training/"
echo ${petavision_dir}
list_dir=${petavision_dir}"list_canny/"
echo ${list_dir}
object_dir=${list_dir}${object_ID}"/"
echo ${object_dir[0]}
if [[ ! -d "${object_dir[0]}" ]]; then
mkdir ${object_dir[0]}
fi
for version_ID in ${version_IDs[*]}
do
echo ${version_ID}
object_file=${object_dir[0]}${object_ID}"_"${version_ID}"_fileOfFilenames.txt"
echo ${object_file[0]}
for clip_ID in ${clip_IDs[*]}
do
# echo ${clip_ID}
clip_list=${list_dir}${clip_ID}"/"${clip_ID}"_"${version_ID}"_fileOfFilenames.txt"
# echo ${clip_list}
cat ${clip_list} >> ${object_file}
done # clip_ID
done # version_ID
|
dpaiton/OpenPV
|
pv-core/mlab/NeoVis2/combineFileOfFilenames.sh
|
Shell
|
epl-1.0
| 854 |
#!/bin/bash
. base.sh
#verifica quantidade minima de argumentos
[ $# -lt 5 ] && { printError "<b>Erro:</b> Preencha todos os campos."; exit 2; }
# atribuicao de variaveis
login=$1
senha=$2
grupo=$3
docs=$4
shift 4
users="$*"
checkLogin
[ $needjoin -eq 1 ] && sudo net ads join -U$login%$senha -W $domain &> /dev/null
# verifica se o grupo a ser criado ja existe
# retornando mensagem de erro e finalizando
# o script caso exista.
[ $(getent group $grupo) ] && { printError "<b>Erro:</b> Grupo <b>$grupo</b> ja existe."; exit 9; }
[ $docs -eq 0 ] && path="/rede/grupos" || path="/rede/grupos/DocsScaneados"
# adiciona o grupo
net rpc group add $grupo -S $servidor -W $domain -U$login%$senha &> .tmp$$
# se o retorno for OK, apresenta mensagem de sucesso
# caso contrario, exibe mensagem de erro na tela
[ $? -eq 0 ] && {
printOK "Grupo <b>$grupo</b> criado com sucesso"
} || printError "Erro: $(cat .tmp$$)";
rm .tmp$$
# adicionar usuarios ao grupo
./grupos.sh -L $login -S $senha -G "$grupo" -U "$users"
# criacao de pasta e set de permissoes no servidor de arquivos.
# o servidor deve ter as chaves do usuario que executa o script
# criar pasta e setar permissoes
ssh [email protected] "mkdir $path/${grupo^^};"
# permissoes especiais do DocsScaneados
[ $docs -eq 1 ] && ssh [email protected] "setfacl -R -m g:docsscaneados:rwx,u:impressorastype:rwx $path/${grupo^^};"
# permissoes comuns a todos
ssh [email protected] "setfacl -R -m o:---,g:admingrupos:rwx,g:$idgrupo:rwx $path/${grupo^^}; chmod -R 2770 $path/${grupo^^};"
#[ $? -ne 0 ] && idgrupo=$(ssh [email protected] "getent group | grep ^${grupo}:x | cut -f3 -d:")
# loop para corrigir falha do AD
while [ ! $(ssh [email protected] "getfacl $path/${grupo^^} | grep $grupo:rwx") ];
do
ssh [email protected] "setfacl -R -m o:---,g:admingrupos:rwx,g:$grupo:rwx $path/${grupo^^}; chmod -R 2770 $path/${grupo^^};"
done;
[ $? -eq 0 ] && {
printOK "Compartilhamento <b>$path/${grupo^^}</b> criado com sucesso.<br>Todos os usuarios necessitam realizar novo login"
} || printError "Falha de comunicação";
|
atcasanova/ADonLinux
|
ad/criagrupo.sh
|
Shell
|
gpl-2.0
| 2,085 |
#!/bin/bash
if [ "$1" == "" ]; then
echo "Usage ./rp-emulated.sh <backup folder name>"
exit
fi
SIM_TIME=1
CODE_DIR="/var/tmp/ln"
BACKUP_FOLDER="/var/tmp/ln_result/backup/from_cade/$1"
echo "$BACKUP_FOLDER"
RESULT="$CODE_DIR/results/emulated"
echo "***removing old *.txt files in the result directory...."
rm $RESULT/*.txt
rm $RESULT/*.dat
rm $RESULT/*.svg
echo "***running emulated UDP ..."
./waf --run "scratch/emulated --sim_time=$SIM_TIME --is_tcp=0" > $RESULT/UDP_LOG 2>&1
echo "***running emulated TCP ..."
./waf --run "scratch/emulated --sim_time=$SIM_TIME --is_tcp=1" > $RESULT/TCP_LOG 2>&1
echo "***making the backingup folder $BACKUP_FOLDER ..."
mkdir $BACKUP_FOLDER
echo "***plotting ..."
cd $CODE_DIR/results/
pwd
./dr-emulated.sh
echo "copying results to $BACKUP_FOLDER ..."
cp -r $RESULT/ $BACKUP_FOLDER
cp $CODE_DIR/scratch/emulated.cc $BACKUP_FOLDER
cp $CODE_DIR/emulated-in.txt $BACKUP_FOLDER
cp $CODE_DIR/emulated-out.txt $BACKUP_FOLDER
|
binhqnguyen/ln
|
rp-emulated.sh
|
Shell
|
gpl-2.0
| 971 |
#!/bin/sh
# check the extra config files exist
if [ -f "/etc/asterisk/sip-conf.conf" ]
then
echo "Found /etc/asterisk/sip-conf.conf"
else
echo "; extra config options" > /etc/asterisk/sip-conf.conf
echo "nat=force_rport,comedia" >> /etc/asterisk/sip-conf.conf
echo "registerattempts=0" >> /etc/asterisk/sip-conf.conf
/bin/chmod 0666 /etc/asterisk/sip-conf.conf
/bin/chown asterisk:asterisk /etc/asterisk/sip-conf.conf
fi
if [ -f "/etc/asterisk/sip-register.conf" ]
then
echo "Found /etc/asterisk/sip-register.conf"
else
echo "; SIP Registered Trunks" > /etc/asterisk/sip-register.conf
/bin/chmod 0666 /etc/asterisk/sip-register.conf
/bin/chown asterisk:asterisk /etc/asterisk/sip-register.conf
fi
if [ -f "/etc/asterisk/sip-trunks.conf" ]
then
echo "Found /etc/asterisk/sip-trunks.conf"
else
echo "; SIP IP Authenticated Trunks" > /etc/asterisk/sip-trunks.conf
/bin/chmod 0666 /etc/asterisk/sip-trunks.conf
/bin/chown asterisk:asterisk /etc/asterisk/sip-trunks.conf
fi
HOST_IP=$(ifconfig | awk -F':' '/inet addr/&&!/127.0.0.1/{split($2,_," ");print _[1]}')
arr=$(echo $HOST_IP | tr " " "\n")
for x in $arr
do
ASTERISK_IP=$x
break
done
echo "[general]" > /etc/asterisk/sip.conf
echo "context=public" >> /etc/asterisk/sip.conf
echo "allowoverlap=no" >> /etc/asterisk/sip.conf
echo "udpbindaddr=$ASTERISK_IP" >> /etc/asterisk/sip.conf
echo "tcpenable=no" >> /etc/asterisk/sip.conf
echo "tcpbindaddr=0.0.0.0" >> /etc/asterisk/sip.conf
echo "transport=udp" >> /etc/asterisk/sip.conf
echo "realm=telecube.com.au" >> /etc/asterisk/sip.conf
echo "srvlookup=yes" >> /etc/asterisk/sip.conf
echo "maxexpiry=240" >> /etc/asterisk/sip.conf
echo "minexpiry=120" >> /etc/asterisk/sip.conf
echo "defaultexpiry=180" >> /etc/asterisk/sip.conf
echo "language=au" >> /etc/asterisk/sip.conf
echo "sendrpid = pai" >> /etc/asterisk/sip.conf
echo "useragent=Asterisk (Telecube) PBX" >> /etc/asterisk/sip.conf
echo "callcounter = yes" >> /etc/asterisk/sip.conf
echo "directmedia=no" >> /etc/asterisk/sip.conf
echo "sdpsession=Asterisk (Telecube) PBX" >> /etc/asterisk/sip.conf
echo "rtcachefriends=yes" >> /etc/asterisk/sip.conf
echo "rtsavesysname=yes" >> /etc/asterisk/sip.conf
echo "alwaysauthreject=yes" >> /etc/asterisk/sip.conf
echo "progressinband=yes" >> /etc/asterisk/sip.conf
echo "" >> /etc/asterisk/sip.conf
echo "" >> /etc/asterisk/sip.conf
echo '#include "sip-conf.conf"' >> /etc/asterisk/sip.conf
echo "" >> /etc/asterisk/sip.conf
echo '#include "sip-register.conf"' >> /etc/asterisk/sip.conf
echo "" >> /etc/asterisk/sip.conf
echo '#include "sip-trunks.conf"' >> /etc/asterisk/sip.conf
echo "" >> /etc/asterisk/sip.conf
/bin/chmod 0666 /etc/asterisk/sip.conf
/bin/chown asterisk:asterisk /etc/asterisk/sip.conf
# reload sip.conf
/usr/sbin/asterisk -rx "sip reload"
|
telecube/telecube-pbx
|
updates/sys/update-15.sh
|
Shell
|
gpl-2.0
| 2,785 |
#!/bin/sh
################################################################################
# This file is part of OpenELEC - http://www.openelec.tv
# Copyright (C) 2009-2014 Stephan Raue ([email protected])
#
# OpenELEC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# OpenELEC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OpenELEC. If not, see <http://www.gnu.org/licenses/>.
################################################################################
if [ -f /sys/class/rtc/rtc0/wakealarm ]; then
logger -t setwakeup.sh "### Setting system wakeup time ###"
echo 0 > /sys/class/rtc/rtc0/wakealarm
echo $1 > /sys/class/rtc/rtc0/wakealarm
logger -t setwakeup.sh "### $(cat /proc/driver/rtc) ###"
fi
|
shinose/qplaybox
|
packages/mediacenter/kodi/scripts/setwakeup.sh
|
Shell
|
gpl-2.0
| 1,189 |
#!/bin/sh
wan_mode=$1
wan_iface=$2
if [ "$CONFIG_FEATURE_LED" = "1" ]; then
if [ "$wan_mode" = "1" -o "$wan_mode" = "2" ]; then
echo netdev > /sys/class/leds/internet_led/trigger
echo "$wan_iface" > /sys/class/leds/internet_led/device_name
echo "link tx rx" > /sys/class/leds/internet_led/mode
echo 1 > /sys/class/leds/internet_led/brightness
[ -f /sys/class/leds/internet_led/delay_on ] && echo 125 > /sys/class/leds/internet_led/delay_on
[ -f /sys/class/leds/internet_led/delay_off ] && echo 125 > /sys/class/leds/internet_led/delay_off
[ -f /sys/class/leds/internet_led/timeout ] && echo 500 > /sys/class/leds/internet_led/timeout
else
if [ -n "`/bin/cat /tmp/adsl_status | grep "7"`" ]; then
echo dsl_data > /sys/class/leds/internet_led/trigger
echo 1 > /sys/class/leds/internet_led/brightness
[ -f /sys/class/leds/internet_led/delay_on ] && echo 125 > /sys/class/leds/internet_led/delay_on
[ -f /sys/class/leds/internet_led/delay_off ] && echo 125 > /sys/class/leds/internet_led/delay_off
[ -f /sys/class/leds/internet_led/timeout ] && echo 500 > /sys/class/leds/internet_led/timeout
fi
fi
fi
|
kbridgers/VOLTE4GFAX
|
package/feeds/ltq_feeds_netcomp_cpe/ifx_config_common_features/files/etc/init.d/internet_led_control.sh
|
Shell
|
gpl-2.0
| 1,228 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#run in src directory
cd "$DIR"
#MIXXX_CONTROLLER_INSTALL_DIR="/usr/share/mixxx/controllers"
MIXXX_CONTROLLER_INSTALL_DIR="${HOME}/download/mixxx-release-2.1.1/res/controllers/"
CONTROLLER_XML_FILENAME="Reloop RMP-3.midi.xml"
CONTROLLER_JS_FILENAME="Reloop-RMP-3-scripts.js"
checkAvail()
{
which "$1" >/dev/null 2>&1
ret=$?
if [ $ret -ne 0 ]
then
echo "tool \"$1\" not found. please install"
exit 1
fi
}
for tool in java javac xmlstarlet mktemp; \
do checkAvail "$tool"; done
if [ ! -r "mixxxml.class" ]
then
echo -e "no mixxxml class found."
echo -e "compiling with 'javac mixxxml.java' ..."
javac mixxxml.java
fi
echo "generating xml and js file for rmp3 controller ..."
tmpfile="`mktemp`"
java mixxxml controller.js | xmlstarlet fo > "$tmpfile"
ret=$?
if [ $ret -ne 0 ]
then
echo "failed."
rm -f "$tmpfile"
exit 1
fi
cat "$tmpfile"
echo ""
echo "success!"
echo ""
echo "copying resulting XML file to ${DIR}/../controllers/${CONTROLLER_XML_FILENAME}"
echo "copying controller.js to ${DIR}/../controllers/$CONTROLLER_JS_FILENAME"
#echo "enter to continue, ctrl+c to abort"
#read a
mv "$tmpfile" "${DIR}/../controllers/${CONTROLLER_XML_FILENAME}" \
&& cp controller.js "${DIR}/../controllers/$CONTROLLER_JS_FILENAME" \
&& chmod 644 "${DIR}/../controllers/${CONTROLLER_XML_FILENAME}" \
&& chmod 644 "${DIR}/../controllers/${CONTROLLER_JS_FILENAME}"
echo "installing files to $MIXXX_CONTROLLER_INSTALL_DIR"
if [ ! -d "$MIXXX_CONTROLLER_INSTALL_DIR" ]
then
echo -e "install directory not found: $MIXXX_CONTROLLER_INSTALL_DIR"
echo -e "aborting."
exit 1
fi
cp "${DIR}/../controllers/${CONTROLLER_XML_FILENAME}" "$MIXXX_CONTROLLER_INSTALL_DIR" \
&& cp "${DIR}/../controllers/${CONTROLLER_JS_FILENAME}" "$MIXXX_CONTROLLER_INSTALL_DIR"
ret=$?
echo "done"
exit $ret
#echo "install to ${MIXXX_CONTROLLER_INSTALL_DIR}?"
#read a
#sudo -k
#sudo cp "${DIR}/../controllers/${CONTROLLER_XML_FILENAME}" "${MIXXX_CONTROLLER_INSTALL_DIR}" \
#&& sudo cp "${DIR}/../controllers/${CONTROLLER_JS_FILENAME}" "${MIXXX_CONTROLLER_INSTALL_DIR}" \
#&& sudo chmod 644 "${MIXXX_CONTROLLER_INSTALL_DIR}/${CONTROLLER_XML_FILENAME}" \
#&& sudo chmod 644 "${MIXXX_CONTROLLER_INSTALL_DIR}/${CONTROLLER_JS_FILENAME}"
#EOF
|
7890/rmp3
|
src/dist.sh
|
Shell
|
gpl-2.0
| 2,291 |
#!/bin/sh
sudo pip3.5 install psutil
echo "Installed python35u-psutil sucessfully if not error..."
|
paramecio/pastafari
|
scripts/standard/centos7/install_psutil.sh
|
Shell
|
gpl-2.0
| 102 |
#!/bin/sh
#
# Testing script for IFJ06 - parser only, tests III.
#
file=./tests/sa2/illegalexpressions.input
valid_result=./tests/sa2/illegalexpressions.result
output_file=tmp
parser=./tests/parser
test_num=1
num_passed=0
num_failed=0
echo ""
echo "------------------------------------------------"
echo "Parser - tests III - nelegalni vyrazy - enjoy ;)"
echo "------------------------------------------------"
echo ""
for expression in `cat $file`;
do
echo "void main(); void main() var int inttmp, double doubletmp, string stringtmp; {$expression}" | $parser > $output_file 2>&1
result=`diff $valid_result $output_file`
if [ "$result" = "" ]; then
num_passed=$(($num_passed + 1))
else
echo "Failed $test_num"
num_failed=$(($num_failed + 1))
fi
rm $output_file
test_num=$(($num_failed + 1))
done
# Summary
echo ""
echo "------------------------------------------------"
echo "Summary"
echo "------------------------------------------------"
echo ""
echo "Passed: " $num_passed
echo "Failed: " $num_failed
echo ""
|
s3rvac/fit-projects
|
IFJ/tests/sa2/semantic-test3.sh
|
Shell
|
gpl-2.0
| 1,057 |
#!/bin/bash
MNTPOINT='/home/rh/d'
BACKUPSUBDIR='Backups'
CURRENTSUBDIR='current'
PREVIOUSSUBDIR='previous'
LOGDIR='/home/rh/d/Backups/log'
FILEPREFIX='Backup'
BACKUPS='/'
TARSNAPFILE='/home/rh/d/Backups/backup.snar'
EXCLUDEFILE='/home/rh/d/Backups/exclude_list.txt'
DATESTR=$(date +%F_%H-%M)
cnt=0
for f in ${MNTPOINT}/${BACKUPSUBDIR}/${CURRENTSUBDIR}/*
do
echo "Processing $f file..."
#SUBSTRING=`echo $f| cut -d'_' -f 4`
#echo $SUBSTRING
let "cnt += 1"
done
echo $cnt
if [ ! -d "${MNTPOINT}" ]
then
echo "Destination ${MNTPOINT} is not a directory!\n"
exit 1
fi
if [ ! -f "${EXCLUDEFILE}" ] || [ ! -r "${EXCLUDEFILE}" ]
then
echo "Exclude file ${EXCLUDEFILE} does not exist!\n"
exit $(fusermount -u ${MNTPOINT})+1
fi
if [ ! -d "${MNTPOINT}/${BACKUPSUBDIR}/${CURRENTSUBDIR}" ]
then
echo "Backup directory ${MNTPOINT}/${BACKUPSUBDIR}/${CURRENTSUBDIR} does not exist!\n"
exit $(fusermount -u ${MNTPOINT})+1
fi
if [ ! -d "${LOGDIR}" ]
then
echo "Log directory ${LOGDIR} does not exist!\n"
exit $(fusermount -u ${MNTPOINT})+1
fi
tar --create \
--preserve-permissions \
--verbose \
--gzip \
--one-file-system \
--listed-incremental=${TARSNAPFILE} \
--exclude-from=${EXCLUDEFILE} \
--file=${MNTPOINT}/${BACKUPSUBDIR}/${CURRENTSUBDIR}/${FILEPREFIX}_current_part_${cnt}.tar.gz \
${BACKUPS} \
1> ${LOGDIR}/${FILEPREFIX}_${DATESTR}_part${PARTSTR}.log \
2> ${LOGDIR}/${FILEPREFIX}_${DATESTR}_part${PARTSTR}.err
|
rahul003/useful_scripts
|
backup/incr_backup.sh
|
Shell
|
gpl-2.0
| 1,482 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd "$DIR"
rm *.o
g++ -O3 -fno-inline -g semaphore.cc shared.cc signals.cc thread.cc --std=c++17 -O0 -g -z execstack -fno-stack-protector -c
ld -r *.o -o threading.obj
rm *.o
mv threading.obj threading.o
|
Qwaz/solved-hacking-problem
|
GoogleCTF/2020 Quals/threading/prob/threading/build.sh
|
Shell
|
gpl-2.0
| 289 |
#!/usr/bin/env bash
bash_dir="$HOME/dev/scripts/bash"
py_dir="$HOME/dev/scripts/bash"
experiments_dir="$HOME/dev/experiments"
template_dir="$HOME/dev/scripts"
template_sh="$template_dir/template.sh"
template_cpp="$template_dir/template.cpp"
template_h="$template_dir/template.h"
template_rs="$template_dir/template.rs"
dots_dir="$HOME/dev/misc/dots"
|
ltlollo/misc
|
scripts/import_template.sh
|
Shell
|
gpl-2.0
| 354 |
#!/bin/bash
cd /opt/development/hbs/clustermapping/data/processing
time node --expose_gc --max-old-space-size=8192 import.js "$1" "$2"
cd -
|
clustermapping/cmp
|
data/processing/scripts/run.sh
|
Shell
|
gpl-2.0
| 140 |
#!/bin/bash
# Run something
master=$1
PRIMARY_HOST=$2
PRIMARY_PORT=$3
if [ "${master}" == true ] ; then
mongod --master
else
mongod --slave --source ${PRIMARY_HOST}:${PRIMARY_PORT}
fi
|
hdzierz/Kaka
|
setup.sh
|
Shell
|
gpl-2.0
| 196 |
version=\
(
'1.22.1'
)
url=\
(
"http://busybox.net/downloads/busybox-$version.tar.bz2"
)
md5=\
(
'337d1a15ab1cb1d4ed423168b1eb7d7e'
)
post_unpack()
{
patches=$(ls "$pkg_dir"/patches/*.patch)
if [ -n "$patches" ]; then
cat $patches | patch -p1
fi
}
configure()
{
cp "$pkg_dir"/config .config &&
yes '' | $cmd_make CROSS_COMPILE="$cfg_target_canonical"- oldconfig
}
build()
{
$cmd_make CROSS_COMPILE="$cfg_target_canonical"-
}
target_install()
{
$cmd_make CROSS_COMPILE="$cfg_target_canonical"- CONFIG_PREFIX=$cfg_dir_rootfs install &&
tar -C "$pkg_dir/fs" --exclude .svn -c -f - . | tar -C "$cfg_dir_rootfs" -x -v -f -
}
|
krisklau/glued
|
rules/busybox/default.bash
|
Shell
|
gpl-2.0
| 679 |
addon_main() {
loop_all_plugins plugin_pre_vdr_start
}
|
lucianm/gentoo-vdr-scripts
|
usr/share/vdr/rcscript/pre-start-95-plugins.sh
|
Shell
|
gpl-2.0
| 57 |
# If upgrading from before 2.4.0
if [[ $1 < '2.4.0' ]]
then
/sbin/restorecon -i -R /etc/httpd/conf.d/pulp.conf
/sbin/restorecon -i -R /etc/pulp
/sbin/restorecon -i -R /etc/pki/pulp
/sbin/restorecon -i -R /srv/pulp
/sbin/restorecon -i -R /usr/bin/pulp-admin
/sbin/restorecon -i -R /usr/bin/pulp-consumer
/sbin/restorecon -i -R /var/lib/pulp
/sbin/restorecon -i -R /var/log/pulp
fi
# If upgrading from before 2.5.0
if [[ $1 < '2.5.0' ]]
then
# Relabel the celery binary
/sbin/restorecon -i -R /usr/bin/celery
fi
# If upgrading from before 2.7.0
if [[ $1 < '2.7.0' ]]
then
/sbin/restorecon -i -R /var/cache/pulp
fi
|
mhrivnak/pulp
|
server/selinux/server/relabel.sh
|
Shell
|
gpl-2.0
| 656 |
#!/bin/bash
livecd-iso-to-disk --format --reset-mbr --ks install.ks --label CentOS7XSCE "$@"
|
tim-moody/xsce
|
installer/netinstall/CentOS-7/alter.sh
|
Shell
|
gpl-2.0
| 93 |
#!/bin/bash
PATH=$PATH:..:../../deps/shunit2-2.1.6/src
export PATH
function _setUp()
{
echo "PATH=$PATH"
echo "PGHOME=$PGHOME"
echo "PGDATA=$PGDATA"
ps auxx > setUp.log
}
function testProcStat001()
{
OUT=${_SHUNIT_TEST_}.out
pt-proc-stat --help > $OUT
cat<<EOF >${_SHUNIT_TEST_}.expected
Usage: pt-proc-stat [option...] [delay [count]]
Options:
-D, --pgdata=DATADIR Location of the database storage area
-P, --pid=PID Process ID of the postmaster
--help Print this help.
EOF
diff -rc ${_SHUNIT_TEST_}.expected $OUT
assertEquals 0 $?
}
function testProcStat002()
{
OUT=${_SHUNIT_TEST_}.out
sudo env PATH=$PATH pt-proc-stat > $OUT
grep 'postmaster\|postgres' ${_SHUNIT_TEST_}.out > /dev/null
assertEquals 0 $?
grep stats.collector $OUT > /dev/null
assertEquals 0 $?
}
function testProcStat003()
{
OUT=${_SHUNIT_TEST_}.out
sudo env PATH=$PATH pt-proc-stat -D $PGDATA > $OUT
grep 'postmaster\|postgres' $OUT > /dev/null
assertEquals 0 $?
grep stats.collector ${_SHUNIT_TEST_}.out > /dev/null
assertEquals 0 $?
}
function testProcStat004()
{
OUT=${_SHUNIT_TEST_}.out
PID=`ps auxx | grep bin/post | grep -v grep | sort | head -1 | awk '{ print $2 }'`
sudo env PATH=$PATH pt-proc-stat -P $PID > $OUT
grep 'postmaster\|postgres' $OUT > /dev/null
assertEquals 0 $?
grep stats.collector ${_SHUNIT_TEST_}.out > /dev/null
assertEquals 0 $?
sudo env PATH=$PATH pt-proc-stat --pid $PID > $OUT
grep 'postmaster\|postgres' $OUT > /dev/null
assertEquals 0 $?
grep stats.collector ${_SHUNIT_TEST_}.out > /dev/null
assertEquals 0 $?
}
function testProcStat005()
{
OUT=${_SHUNIT_TEST_}.out
sudo env PATH=$PATH pt-proc-stat -D nosuchdir > $OUT
assertEquals 1 $?
}
. shunit2
|
uptimejp/postgres-toolkit
|
tests_cli/test-pt-proc-stat.sh
|
Shell
|
gpl-2.0
| 1,876 |
make_exclude_list_mydroid()
{
local i exclude_components exclude_list
exclude_components='clang/darwin-x86* misc/darwin-x86* gcc/darwin-x86*
gcc/linux-x86/mips gcc/linux-x86/x86 gcc/linux-x86/aarch64
tools/darwin-x86* python/darwin-x86* sdk/tools/darwin-x86*
misc/android-mips* android-emulator/darwin-x86_64 sdk/tools/darwin
'
exclude_list='--exclude out/*'
for i in $exclude_components
do
exclude_list="$exclude_list --exclude prebuilts/$i"
done
echo "$exclude_list"
}
make_exclude_list_kernel()
{
:
}
pack_common() {
local i tmpDir excludeList srcDir baseName mainName
srcDir="$1"
shift 1
if [ -z "$srcDir" ]; then
echo "srcDir is not set" >&2
exit 1
fi
if [ ! -d "$srcDir" ]; then
echo "$srcDir is not a directory" >&2
exit 1
fi
baseName=$(basename "$srcDir")_$(date +%Y%m%d)
mainName=$baseName
if [ -r "${baseName}.7z" ]; then
for((i=1; ; i++)) {
mainName="${baseName}_$(printf "%04u" $i)"
if [ ! -r "${mainName}.7z" ]; then
break
fi
}
fi
tmpDir=~/~tmp-$mainName
if [ "$1" != all ]; then
case "$srcDir" in
$MYDROID) excludeList=$(make_exclude_list_mydroid);;
$KERNELDIR) excludeList=$(make_exclude_list_kernel);;
esac
fi
echo "rsync -a $excludeList $srcDir $tmpDir"
rsync -a $excludeList $srcDir $tmpDir
if [ $? -eq 0 ]; then
7za a -mmt=12 "${mainName}.7z" $tmpDir
fi
rm -rf $tmpDir/
}
pack_mydroid()
{
pack_common "$MYDROID" "$1"
}
pack_mydroid
|
panruochen/dragoneye
|
rarely-used/pack-it.bash
|
Shell
|
gpl-2.0
| 1,437 |
## var
## Check if is a number.
##
## Params:
## var: Variable to check if is a number.
##
## Return: 0 if variable is a number, 1 if variable is not a number.
local var="$1"
if [[ "${var}" =~ ^[0-9]+$ ]]; then
return 0
fi
return 1
|
reduardo7/hsabx
|
src/utils/is-number.sh
|
Shell
|
gpl-2.0
| 239 |
#!/bin/sh
# Environment variables for the Qt package.
#
# It's best to use the generic directory to avoid
# compiling in a version-containing path:
if [ -d /usr/lib/qt ]; then
QT4DIR=/usr/lib/qt
else
# Find the newest Qt directory and set $QT4DIR to that:
for qtd in /usr/lib/qt-* ; do
if [ -d $qtd ]; then
QT4DIR=$qtd
fi
done
fi
if [ ! "$CPLUS_INCLUDE_PATH" = "" ]; then
CPLUS_INCLUDE_PATH=$QT4DIR/include:$CPLUS_INCLUDE_PATH
else
CPLUS_INCLUDE_PATH=$QT4DIR/include
fi
PATH="$PATH:$QT4DIR/bin"
export QT4DIR
export CPLUS_INCLUDE_PATH
|
Ponce/current-source
|
l/qt/profile.d/qt4.sh
|
Shell
|
gpl-2.0
| 561 |
#!/bin/sh
#
# mkfs_test -o <outdir> -d <device>
#
usage() {
echo "usage: ${MKFS_TEST} -o <outdir> -d <device> -m <mountpoint>"
echo " -o output directory for the logs"
echo " -d device"
echo " -m mountpoint"
exit 1
}
verify_sizes() {
if [ "$#" -lt "4" ] ; then
echo "verify_size(): blocksize clustersize volsize out" |tee -a ${LOGFILE}
exit 1
fi
B=$1
C=$2
V=$3
O=$4
RET=0
${DEBUGFS} -R "stats" ${device} >> ${O} 2>/dev/null
num1=`${AWK} '/Block Size Bits/ {print $4;}' ${O}`
num2=`${AWK} '/Cluster Size Bits/ {print $8;}' ${O}`
num3=`${AWK} '/Clusters:/ {print $4;}' ${O}`
if [ ${num1} -eq 0 ] || [ ${num2} -eq 0 ] || [ ${num3} -eq 0 ]
then
echo "error: device not formatted" |tee -a ${LOGFILE}
exit 1
fi
b=$[$[2**$[${num1} - 9]]*512]
c=$[$[2**$[${num2} - 9]]*512]
v=$[${num3} * ${c}/${b}]
echo -n "verify ..... " |tee -a ${LOGFILE}
if [ ${B} -ne ${b} ]; then
echo "ERROR: Blocksize mismatch - found ${b}, expected ${B}" >> ${O}
RET=1
fi
if [ ${C} -ne ${c} ]; then
echo "ERROR: Clustersize mismatch - found ${c}, expected ${C}" >> ${O}
RET=1
fi
if [ ${V} -ne ${v} ]; then
echo "ERROR: Volumesize mismatch - found ${v}, expected ${V}" >> ${O}
RET=1
fi
echo "" >> ${O}
if [ ${RET} -ne 0 ]; then
echo "FAILED. Errors in ${O}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
return ${RET}
}
get_partsz() {
dev=`echo ${device} | sed 's/\/dev\///'`
num=`cat /proc/partitions | ${AWK} -v DEV=${dev} '
BEGIN{dev=DEV} // {split($0, a); if (a[4] == dev) {printf("%u\n", $3); exit 0;} }'`
if [ ${num} -eq 0 ]; then
echo "error: unable to find size of device" |tee -a ${LOGFILE}
exit 1
fi
partsz=$[${num}*1024]
return 0
}
do_fsck() {
if [ "$#" -lt "1" ]; then
echo "do_fsck(): <out>" |tee -a ${LOGFILE}
exit 1
fi
out=$1
echo ${FSCK} -fn ${device} >>${LOGFILE}
echo -n "fsck ..... " |tee -a ${LOGFILE}
echo ${FSCK} -fn ${device} >>${out}
${FSCK} -fn ${device} >>${out} 2>&1
grep "All passes succeeded" ${LOGFILE} >/dev/null 2>&1
grep "All passes succeeded" ${out} >/dev/null 2>&1
if [ $? -ne 0 ] ; then
echo "FAILED. Errors in ${out}" |tee -a ${LOGFILE}
exit 1
else
echo "OK" |tee -a ${LOGFILE}
fi
echo "" >> ${out}
return 0
}
do_mkfs() {
if [ "$#" -lt "5" ] ; then
echo "do_mkfs(): blocksize clustersize device volsize out" |tee -a ${LOGFILE}
exit 1
fi
B=$1
C=$2
D=$3
V=$4
O=$5
echo ${MKFS} -b ${B} -C ${C} ${D} ${V} >> ${LOGFILE}
echo -n "mkfs ..... " |tee -a ${LOGFILE}
echo ${MKFS} -b ${B} -C ${C} ${D} ${V} >> ${O}
${MKFS} -x -F -b ${B} -C ${C} -N 1 -J size=4M ${D} ${V} >> ${O} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo "" >> ${O}
}
do_mount() {
# mount the device on mntdir
echo -n "mount ${device} ${mntdir} " |tee -a ${LOGFILE}
${MOUNT} -t ocfs2 ${device} ${mntdir} 2>/dev/null
if [ $? -ne 0 ]
then
echo -n "FAILED. Check dmesg for errors." 2>&1 |tee -a ${LOGFILE}
exit 1
else
echo "OK" |tee -a ${LOGFILE}
fi
}
do_umount() {
# umount the volume
echo -n "umount ${mntdir} " |tee -a ${LOGFILE}
${UMOUNT} ${mntdir} 2>/dev/null
if [ $? -ne 0 ]
then
echo "FAILED. Check dmesg for errors." 2>&1 |tee -a ${LOGFILE}
exit 1
else
echo "OK" |tee -a ${LOGFILE}
fi
}
do_consume() {
file_type=$1
# create 1M sized files
fillbsz=1048576
# find the free space
freespace=`df --block-size=${fillbsz} ${device} |
awk -v DEV=${device} 'BEGIN {dev=DEV;} // { if ($1 == dev) print $4; }'`
if [ $file_type -eq 0 ]
then
echo -n "create ${freespace} files " |tee -a ${LOGFILE}
else
freespace=$[${freespace}/2]
echo -n "create 2 large files " |tee -a ${LOGFILE}
fi
j=0
for((i=0;i<$freespace;i++))
do
if [ $[$i % 11] -eq 0 ]
then
if [ $j -eq 10 ]
then
echo -ne "\b\b\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b\b\b" |tee -a ${LOGFILE}
j=0
else
echo "." |tee -a ${LOGFILE}
j=$[$j+1]
fi
fi
if [ $file_type -eq 0 ]
then
dd if=/dev/zero of=${usedir}/file$i bs=${fillbsz} count=1 >/dev/null 2>&1
else
dd if=/dev/zero of=${usedir}/test_file1 bs=${fillbsz} count=1 seek=$i >/dev/null 2>&1
dd if=/dev/zero of=${usedir}/test_file2 bs=${fillbsz} count=1 seek=$i >/dev/null 2>&1
fi
if [ $? -ne 0 ]
then
i=0
echo
break;
fi
done
if [ $i -ne 0 ] ; then echo ; fi
return 0
}
do_consume_and_delete() {
file_type=$1
do_mount
# add files to fill up (not necessarily full)
usedir=${mntdir}/`${DATE} +%Y%m%d_%H%M%S`
echo "create testdir ${usedir}" |tee -a ${LOGFILE}
${MKDIR} -p ${usedir}
do_consume $file_type
do_umount
do_fsck ${OUT}
#delete all the files.
do_mount
${RM} -rf ${usedir}
echo "delete all test files" |tee -a ${LOGFILE}
do_umount
do_fsck ${OUT}
}
do_bitmap_test() {
# do_consume_and_delete will consume the disks and free them to see whether
# it is OK. 0 is to create many small files and 1 is used to creat 2 very
# large files.
do_consume_and_delete 0
do_consume_and_delete 1
}
MKFS="`which sudo` -u root `which mkfs.ocfs2`"
FSCK="`which sudo` -u root `which fsck.ocfs2`"
DEBUGFS="`which sudo` -u root `which debugfs.ocfs2`"
MOUNT="`which sudo` -u root `which mount.ocfs2`"
UMOUNT="`which sudo` -u root `which umount`"
MKDIR="`which sudo` -u root `which mkdir`"
RM="`which sudo` -u root `which rm`"
GREP=`which grep`
DATE=`which date`
AWK=`which awk`
MKFS_TEST=`basename $0`
bindir=`basename ${0}`
outdir=`basename ${bindir}`
device=
mntdir=
OPTIND=1
while getopts "d:i:o:m:c" args
do
case "$args" in
o) outdir="$OPTARG";;
d) device="$OPTARG";;
m) mntdir="$OPTARG";;
esac
done
LOGFILE=${outdir}/mkfs-test.log
if [ -f ${LOGFILE} ]; then
mv ${LOGFILE} `dirname ${LOGFILE}`/`date +%F-%H-%M-%S`-`basename ${LOGFILE}`
fi;
if [ -z "${outdir}" ]; then
echo "invalid output directory: ${outdir}"
usage ;
fi
if [ ! -b "${device}" ]; then
echo "invalid device: ${device}" |tee -a ${LOGFILE}
usage ;
fi
if [ -z "${mntdir}" ]; then
echo "invalid mount point: ${mntdir}" |tee -a ${LOGFILE}
usage ;
fi
echo "create logdir ${outdir}" |tee -a ${LOGFILE}
mkdir -p ${outdir}
#get partition size
partsz=0
get_partsz
numblks=1048576
testnum=1
### Test all combinations of blocksizes and clustersizes
for blks in 512 1024 2048 4096
do
for clusts in 4096 8192 16384 32768 65536 131072 262144 524288 1048576
do
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: -b ${blks} -C ${clusts}" |tee -a ${LOGFILE}
do_mkfs ${blks} ${clusts} ${device} ${numblks} ${OUT}
verify_sizes ${blks} ${clusts} ${numblks} ${OUT}
do_fsck ${OUT}
testnum=$[$testnum+1]
done
done
### Test option '-T mail'
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: -T mail" |tee -a ${LOGFILE}
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b 4K -C 4K -N 2 -T mail ${device} 262144 >${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "ls -l //" ${device} >>${OUT} 2>&1
num=`${AWK} '/journal:0000/ {print $6;}' ${OUT}`
if [ $num -ne 134217728 ]; then
echo "ERROR: Journal size too small for type mail" >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
### Test option '-T datafiles'
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: -T datafiles" |tee -a ${LOGFILE}
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b 4K -C 4K -N 2 -T datafiles ${device} 262144 >${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "ls -l //" ${device} >>${OUT} 2>&1
num=`${AWK} '/journal:0000/ {print $6;}' ${OUT}`
if [ $num -ne 33554432 ]; then
echo "ERROR: Journal size too small for type datafiles" >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
### Test option '-J size=64M'
### Test option '-J size=256M'
for jrnlsz in 64 256
do
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: -J size=${jrnlsz}M" |tee -a ${LOGFILE}
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b 4K -C 4K -N 2 -J size=${jrnlsz}M ${device} 262144 >${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "ls -l //" ${device} >>${OUT} 2>&1
num=`${AWK} '/journal:0000/ {print $6;}' ${OUT}`
inbytes=$[$jrnlsz*1024*1024]
if [ $num -ne ${inbytes} ]; then
echo "ERROR: Journal size expected ${inbytes} but found ${num}" >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
done
### Test option '-N 4'
### Test option '-N 32'
for slots in 4 32
do
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: -N ${slots}" |tee -a ${LOGFILE}
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b 4K -C 4K -N ${slots} -J size=4M ${device} 262144 >${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "stats" ${device} >>${OUT} 2>&1
num=`${AWK} '/Max Node Slots:/ {print $4;}' ${OUT}`
if [ $num -ne ${slots} ]; then
echo "ERROR: Node slots expected ${slots} but found ${num}" >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
done
### Test option '-L mylabel'
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: -L mylabel" |tee -a ${LOGFILE}
label="my_label_is_very_very_very_long_to_the_point_of_being_useless"
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b 4K -C 4K -N 1 -L ${label} ${device} 262144 >{OUt} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "stats" ${device} >${OUT} 2>&1
dsklab=`${AWK} '/Label:/ {print $2;}' ${OUT}`
if [ ${label} != ${dsklab} ]; then
echo "ERROR: Label found \"${dsklab}\" expected \"${label}\"" >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
### Test option '--fs-features=inline-data'
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: --fs-features=inline-data" |tee -a ${LOGFILE}
label="Oracle_Home"
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} --fs-features=inline-data -x -F -b 4K -C 4K -N 2 -L ${label} ${device} 262144 >>${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "stats" ${device} >>${OUT} 2>&1
${DEBUGFS} -R "stats" ${device}|${GREP} -i "Feature Incompat"|${GREP} -q "inline-data"
RC=$?
if [ "${RC}" != "0" ]; then
echo "ERROR: Did not find InlineData Flag on superblock " >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
### Test default support for sparse file'
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
echo "Test ${testnum}: Default option for sparse file support" |tee -a ${LOGFILE}
label="Oracle_Home"
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b 4K -C 4K -N 2 -L ${label} ${device} 262144 >>${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
echo -n "verify ..... " |tee -a ${LOGFILE}
${DEBUGFS} -R "stats" ${device} >>${OUT} 2>&1
${DEBUGFS} -R "stats" ${device}|${GREP} -i "Feature Incompat"|${GREP} -q "sparse"
RC=$?
if [ "${RC}" != "0" ]; then
echo "ERROR: Did not find Sparse Flag on superblock " >> ${OUT}
echo "" >> ${OUT}
echo "FAILED. Errors in ${OUT}" |tee -a ${LOGFILE}
else
echo "OK" |tee -a ${LOGFILE}
fi
do_fsck ${OUT}
testnum=$[$testnum+1]
### Test bitmap_cpg change
TAG=mkfs_test_${testnum}
OUT=${outdir}/${TAG}.log
if [ -f ${OUT} ]; then
rm -f ${OUT};
fi;
blocksz=4096
clustsz=1048576
group_bitmap_size=$[$[${blocksz}-64]*8]
blkcount=$[${group_bitmap_size}/2*${clustsz}/${blocksz}]
total_block=$[${partsz}/${blocksz}]
if [ $blkcount -gt $total_block ];
then
blkcount=$total_block
fi
echo "Test ${testnum}: bitmap_cpg change" |tee -a ${LOGFILE}
echo -n "mkfs ..... " |tee -a ${LOGFILE}
${MKFS} -x -F -b ${blocksz} -C ${clustsz} -N 2 ${device} ${blkcount} >${OUT} 2>&1
echo "OK" |tee -a ${LOGFILE}
#consume the whole volume and then delete all the files.
do_bitmap_test
testnum=$[$testnum+1]
### Test --no-backup-super option
### Test option '-M local'
|
pibroch/ocfs2-test
|
programs/mkfs-tests/mkfs-test.sh
|
Shell
|
gpl-2.0
| 13,403 |
#! /bin/sh
#
# USB Controller Gadget Daemon (sprinkler.sh)
# Copyright (C) 2012 Mickey Malone <mickey.malone at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# > update-rc.d sprinkler defaults 60
#
sprinkler=${SPRINKLER_BIN:-/sbin/sprinklerdaemon}
config=${SPRINKLER_CONF:-/etc/sprinkler.conf}
pid=$(ps -ef | grep sprinklerdaemon | grep -v grep | awk '{print $2}')
test -x "$sprinkler" || exit 0
case "$1" in
start)
if [ "X$pid" != "X" ]
then
echo "already running [$pid]"
exit 1
fi
echo -n "Starting sprinklerdaemon"
$sprinkler -d -f 1 -c $config
echo "."
;;
stop)
echo -n "Stopping sprinklerdaemon"
kill -s TERM $pid
echo "."
;;
status)
if [ "X$pid" != "X" ]
then
echo "Sprinkler daemon is running as process id [$pid]"
else
echo "Sprinkler daemon is not running"
fi
;;
refresh)
echo "Sending HUP to sprinkler daemon"
kill -s HUP $pid
;;
*)
echo "Usage: /etc/init.d/sprinklerdaemon {start|stop|restart}"
exit 1
esac
exit 0
|
evinrude/controller-gadget-kmod
|
daemon/sprinkler.sh
|
Shell
|
gpl-2.0
| 1,630 |
#!/usr/bin/env bash
sudo dpkg --add-architecture i386 &&
sudo apt-get update &&
sudo apt-get install libc6:i386
|
NoviceLive/unish
|
bin/multiarch.sh
|
Shell
|
gpl-3.0
| 122 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Debug_Linux
CND_DISTDIR=dist
CND_BUILDDIR=build
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libskyfire.a
OUTPUT_BASENAME=libskyfire.a
PACKAGE_TOP_DIR=skyfire/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/skyfire/lib"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/skyfire.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/skyfire.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
daneren2005/Skyfire
|
nbproject/Package-Debug_Linux.bash
|
Shell
|
gpl-3.0
| 1,443 |
################################################################################kjkj
# PATHS
################################################################################
ngsphyPATH="$HOME/git/ngsphy/"
CURRENT_DIR="$(pwd)"
CASE_NAME="test2"
MYRANDOMNUM=50426717
GATK="$HOME/apps/gatk/3.8-0-ge9d806836/GenomeAnalysisTK.jar"
PICARD="$HOME/apps/picard/picard.jar"
referenceFile="$CURRENT_DIR/${CASE_NAME}/reference/reference.fasta"
coverages=( "2x" "10x" "20x" "100x" "200x")
################################################################################
# Data organization
################################################################################
echo "Creating test folder"
mkdir -p ${CURRENT_DIR}/${CASE_NAME}/files/ \
${CURRENT_DIR}/${CASE_NAME}/output/ ${CURRENT_DIR}/${CASE_NAME}/src/ \
$CURRENT_DIR/${CASE_NAME}/reference $CURRENT_DIR/${CASE_NAME}/img
echo "Gathering all the data in a single folder"
cp ${ngsphyPATH}/data/settings/ngsphy.settings.supp.test2.2x.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/settings/ngsphy.settings.supp.test2.10x.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/settings/ngsphy.settings.supp.test2.20x.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/settings/ngsphy.settings.supp.test2.100x.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/settings/ngsphy.settings.supp.test2.200x.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/settings/ngsphy.settings.supp.test2.200x.rc.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/indelible/control.supp.test2.txt ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/trees/supp.test2.tree ${CURRENT_DIR}/${CASE_NAME}/files/
cp ${ngsphyPATH}/data/reference_alleles/my_reference_allele_file.test2.txt ${CURRENT_DIR}/${CASE_NAME}/files/
echo "Moving to the working directory"
cd ${CURRENT_DIR}/${CASE_NAME}
################################################################################
# 1. NGSphy read counts - For true Variants
################################################################################
ngsphy -s files/ngsphy.settings.supp.test2.200x.rc.txt
################################################################################
# 2. Reference selection
################################################################################
cat ${CURRENT_DIR}/${CASE_NAME}/NGSphy_test2_200x_RC/alignments/1/ngsphydata_1_TRUE.fasta | grep -a1 "1_0_0" | tail -2 | tr -d " " > ${CURRENT_DIR}/${CASE_NAME}/reference/reference.fasta
cp ${CURRENT_DIR}/${CASE_NAME}/NGSphy_test2_200x_RC/reads/no_error/REPLICATE_1/ngsphydata_1_1_NOERROR.vcf ${CURRENT_DIR}/${CASE_NAME}/files/true.vcf
################################################################################
# 3. Running NGSphy
################################################################################
echo "Running NGSphy - 100 replicates - Coverage 2x"
for replicate in $(seq 1 100); do { time ngsphy -s ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.settings.supp.test2.2x.txt &> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.2x.output; } 2>> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.2x.timings; done
echo "Running NGSphy - 100 replicates - Coverage 10x"
for replicate in $(seq 1 100); do { time ngsphy -s ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.settings.supp.test2.10x.txt &> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.10x.output; } 2>> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.10x.timings; done
echo "Running NGSphy - 100 replicates - Coverage 20x"
for replicate in $(seq 1 100); do { time ngsphy -s ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.settings.supp.test2.20x.txt &> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.20x.output; } 2>> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.20x.timings; done
echo "Running NGSphy - 100 replicates - Coverage 100x"
for replicate in $(seq 1 100); do { time ngsphy -s ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.settings.supp.test2.100x.txt &> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.100x.output; } 2>> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.100x.timings; done
echo "Running NGSphy - 100 replicates - Coverage 200x"
for replicate in $(seq 1 100); do { time ngsphy -s ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.settings.supp.test2.200x.txt &> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.200x.output; } 2>> ${CURRENT_DIR}/${CASE_NAME}/files/ngsphy.200x.timings; done
################################################################################
# 4. Indexing reference
################################################################################
bwa index $referenceFile
samtools faidx $referenceFile
java -jar -Xmx4G $PICARD CreateSequenceDictionary REFERENCE=$referenceFile OUTPUT="$CURRENT_DIR/${CASE_NAME}/reference/reference.dict"
################################################################################
# 5. Mapping
################################################################################
# Organizational purposes
for coverageLevel in ${coverages[*]}; do
mkdir -p $CURRENT_DIR/${CASE_NAME}/mappings/$coverageLevel
done
for ngsphyoutput in $(find ${CURRENT_DIR}/${CASE_NAME}/output -mindepth 1 -maxdepth 1 -type d); do
coverageFolder=$(basename ${ngsphyoutput})
for ngsphyreplicate in $(ls ${ngsphyoutput}| sort); do
numInds=$(cat ${ngsphyoutput}/${ngsphyreplicate}/ind_labels/${SIMPHY_PROJECT_NAME}.1.individuals.csv | wc -l)
let numInds=numInds-2 # This file has a header
mkdir -p "$CURRENT_DIR/${CASE_NAME}/mappings/${coverageFolder}/${ngsphyreplicate}/"
for ind in $(seq 0 $numInds); do
echo "$ngsphyreplicate/$ind"
infile="${ngsphyoutput}/$ngsphyreplicate/reads/REPLICATE_1/LOCUS_1/${SIMPHY_PROJECT_NAME}_1_1_data_${ind}_"
outfile="$CURRENT_DIR/${CASE_NAME}/mappings/${coverageFolder}/${ngsphyreplicate}/${ngsphyreplicate}_${ind}.sam"
RGID="${ngsphyreplicate}-I${ind}"
machine="HiSeq2500"
echo "bwa mem -M -t 4 -R \"@RG\tID:${RGID}\tSM:${RGID}\tPL:Illumina\tLB:${RGID}\tPU:${machine}\" ${referenceFile} ${infile}R1.fq ${infile}R2.fq > $outfile" >> ${CURRENT_DIR}/${CASE_NAME}/src/mappings.sh
done
done
done
bash $CURRENT_DIR/${CASE_NAME}/src/mappings.sh
################################################################################
# 6. Sorting + bamming
################################################################################
for samFile in $(find ${CURRENT_DIR}/${CASE_NAME}/mappings -type f | grep sam$); do
echo $samFile
outputDIR=$(dirname $samFile)
outputFILE="$(basename $samFile .sam).sorted.bam"
echo "samtools view -bSh $samFile | samtools sort - -f $outputDIR/${outputFILE} -@ 4" >> $CURRENT_DIR/${CASE_NAME}/src/bamming.sh
echo "samtools index $outputDIR/$outputFILE" >> $CURRENT_DIR/${CASE_NAME}/src/bamming.sh
echo "rm $samFile" >> $CURRENT_DIR/${CASE_NAME}/src/bamming.sh
done
bash $CURRENT_DIR/${CASE_NAME}/src/bamming.sh
################################################################################
# 7. Mark Duplicates
################################################################################
summaryFile="$CURRENT_DIR/${CASE_NAME}/files/duplicates.summary.txt"
for bamFile in $(find ${CURRENT_DIR}/${CASE_NAME}/mappings -type f | grep sorted.bam$); do
coverageFolder=$(basename $(dirname $(dirname $bamFile)))
outputDIR=$(dirname $bamFile)
values=($(basename $bamFile | tr "_" " " | tr "." " " ))
indID=${values[-4]}
repID=1
if [[ ${#values} -eq 6 ]]; then
repID=${values[-3]}
fi
dedupOutput="$outputDIR/$(basename $bamFile .sorted.bam).dedup.bam"
metricsOutput="$outputDIR/$(basename $bamFile .sorted.bam).metrics.txt"
histogramOutput="$outputDIR/$(basename $bamFile .sorted.bam).histogram.txt"
echo "picard MarkDuplicates I=$bamFile O=$dedupOutput M=$metricsOutput"
java -jar -Xmx4G $HOME/apps/picard/picard.jar MarkDuplicates INPUT=$bamFile OUTPUT=$dedupOutput METRICS_FILE=$metricsOutput
header=$(head -7 $metricsOutput | tail -n+7)
summaryInfo=$(head -8 $metricsOutput | tail -n+8)
if [[ ! -f $summaryFile ]]; then
echo -e "COVERAGE\tREPLICATE\tINDIVIDUAL_ID\tNUM_MAPPED_READS_SAMTOOLS\tNUM_RECORDS_SAMTOOLS\tNUM_N_READS\t$header" > $summaryFile
fi
numReads=$(samtools view -c $bamFile)
numRecords=$(samtools view $bamFile | wc -l)
numMappedReads=$(samtools view -F 0x4 $bamFile | cut -f 1 | sort | uniq | wc -l)
echo -e "$coverageFolder\t$repID\t$indID\t$numMappedReads\t$numRecords\t$numReads\t$summaryInfo" >> $summaryFile
tail -n+11 $metricsOutput > $histogramOutput
samtools index $dedupOutput
done
################################################################################
# 8. INDEL REALIGNMENT
################################################################################
for bamFile in $(find ${CURRENT_DIR}/${CASE_NAME}/mappings -type f | grep dedup.bam$| tail -n+1700); do
echo "$bamFile"
outputDIR=$(dirname $bamFile)
mkdir -p $outputDIR/target/
targetOutput="$outputDIR/target/$(basename $bamFile .dedup.bam).target.intervals.list"
realignedBam="$outputDIR/$(basename $bamFile .dedup.bam).realigned.bam"
java -jar -Xmx4g $GATK \
-T RealignerTargetCreator \
-R $referenceFile \
-I $bamFile \
-o $targetOutput
java -jar -Xmx4g $GATK \
-T IndelRealigner \
-R $referenceFile \
-I $bamFile \
-targetIntervals $targetOutput \
-o $realignedBam
samtools index $realignedBam
done
################################################################################
# 9. GATK - single call joint genotyping
################################################################################
for bamFile in $(find ${CURRENT_DIR}/${CASE_NAME}/mappings -type f | grep realigned.bam$); do
echo "$bamFile"
outputDIR=$(dirname $bamFile)
mkdir -p $outputDIR/vcf-singlevc-joint-gt/
OUTPUTVCF="$outputDIR/vcf-singlevc-joint-gt/$(basename $bamFile .realigned.bam).g.vcf"
{ time java -jar -Xmx4g $GATK \
-T HaplotypeCaller \
-R $referenceFile \
-I $bamFile \
-ERC GVCF \
-o $OUTPUTVCF; } 2>> ${CURRENT_DIR}/${CASE_NAME}/files/time.gatk.HaplotypeCaller.g.vcf.txt
done
for coverageLevel in ${coverages[*]}; do
coverageFolder="${CURRENT_DIR}/${CASE_NAME}/mappings/$coverageLevel"
for replicate in $(ls $coverageFolder); do
individuals=""
replicateFolder="${CURRENT_DIR}/${CASE_NAME}/mappings/$coverageLevel/$replicate"
for indFile in $(find ${CURRENT_DIR}/${CASE_NAME}/mappings/$coverageLevel/$replicate -type f | grep .g.vcf$); do
individuals+=" -V $indFile"
done
OUTPUTVCF="$replicateFolder/vcf-singlevc-joint-gt/$replicate.vcf"
{ time java -jar -Xmx4g $GATK \
-T GenotypeGVCFs \
-R $referenceFile \
$individuals \
-o $OUTPUTVCF ;} 2>> ${CURRENT_DIR}/${CASE_NAME}/files/time.gatk.genotypeGVCF.txt
done
done
################################################################################
# 10 - Count discovered variants
################################################################################
mkdir ${CURRENT_DIR}/${CASE_NAME}/varsites/
numVariantsSummary="${CURRENT_DIR}/${CASE_NAME}/files/numvariants.summary.txt"
echo -e "COVERAGE\tREPLICATE\tNUM_VARIANTS" > ${CURRENT_DIR}/${CASE_NAME}/files/numvariants.summary.txt
for coverageLevel in ${coverages[*]}; do
for vcffile in $(find ${CURRENT_DIR}/${CASE_NAME}/mappings/$coverageLevel -name "*.vcf"| grep -v g.vcf | grep vcf-singlevc-joint-gt); do
base=$(basename $vcffile)
repID=$(echo $base |tr "_" " " | tr "." " " | awk '{print $4}' )
if [[ repID -eq "vcf" ]]; then
repID=1
fi
numVariants=$(cat $vcffile | grep -v "^#" |wc -l)
mkdir -p ${CURRENT_DIR}/${CASE_NAME}/varsites/$coverageLevel/
cat $vcffile | grep -v "^#" | awk '{print $2}' > ${CURRENT_DIR}/${CASE_NAME}/varsites/$coverageLevel/${base}.varsites
echo -e "$coverageLevel\t$repID\t$numVariants" >> $numVariantsSummary
done
done
################################################################################
# 11. get information per coverage on the varibale sites
################################################################################
for coverageLevel in ${coverages[*]}; do
find ${CURRENT_DIR}/${CASE_NAME}/varsites/$coverageLevel -name "*.varsites" > ${CURRENT_DIR}/${CASE_NAME}/files/varsites.$coverageLevel.files
done
|
merlyescalona/ngsphy
|
manuscript/supp.material/scripts/supp.test2.sh
|
Shell
|
gpl-3.0
| 12,554 |
cd src
zip -r ../ContactLost.love *
|
WilliamBundy/contact-lost-ld26
|
package.sh
|
Shell
|
gpl-3.0
| 36 |
#!/bin/sh
set -e
aclocal
autoconf
libtoolize -q --force --copy
automake --add-missing --copy
echo You may now run ./configure
|
tavianator/dimension
|
autogen.sh
|
Shell
|
gpl-3.0
| 128 |
#!/bin/bash
# Usage:
# bash scripts/train.sh GPU_ID NET DATA CONFIG
#
# Example:
# bash scripts/train.sh 0 inception_v3 cancer_not_annotated cfg2
# bash scripts/train.sh 0 vgg16 cancer_not_annotated cfg1
set -x
set -e
export PYTHONUNBUFFERED="True"
GPU_ID=$1
NET=$2
DATA=$3
CONFIG=$4
LOG="logs/cancer_diagnosis_${NET}_${DATA}_`date +'%Y_%m_%d_%H_%M_%S'`.txt"
exec &> >(tee -a "$LOG")
echo "Logging output to ${LOG}"
export CUDA_VISIBLE_DEVICES=${GPU_ID}
nohup time python lib/train.py --net ${NET} --data ${DATA} --cfg ${CONFIG} &
|
DuJiajun1994/CancerDiagnosis
|
scripts/train.sh
|
Shell
|
gpl-3.0
| 536 |
#!/bin/bash
python AnalyzeSimulation.py --paralog1 YLR333C --paralog2 YGR027C --simnum 85 > YLR333C_YGR027C_MG94_nonclock_Sim85_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Simulation/ShFiles/MG94_YLR333C_YGR027C_sim85.sh
|
Shell
|
gpl-3.0
| 145 |
#!/bin/bash
# -*- coding: utf-8 -*-
# author: maxime déraspe
# email: [email protected]
# download files from web site
function download_files() {
release=$(date +%Y-%m-%d)
out_dir=""
if [ -z $1 ]
then
outdir=cog_$release
else
outdir=$1/cog_$release
fi
mkdir -p $outdir && cd $outdir
echo "Downloading COG $release [$outdir].."
wget ftp://ftp.ncbi.nih.gov/pub/COG/COG2014/data/prot2003-2014.fa.gz
wget ftp://ftp.ncbi.nih.gov/pub/COG/COG2014/data/cog2003-2014.csv
wget ftp://ftp.ncbi.nih.gov/pub/COG/COG2014/data/prot2003-2014.gi2gbk.tab
wget ftp://ftp.ncbi.nih.gov/pub/COG/COG2014/data/cognames2003-2014.tab
wget ftp://ftp.ncbi.nih.gov/pub/COG/COG2014/data/fun2003-2014.tab
}
# gunzip files
function organize_files() {
gunzip *.gz
}
download_files $1
organize_files
|
zorino/bacterialDB-fetcher
|
db-scripts/cog.sh
|
Shell
|
gpl-3.0
| 857 |
#!/usr/bin/env bash
# Usage: dump-termsets.sh gapt-testing.jar path/to/TSTP/Solutions path/to/output/termsets/
set -e
gapt_testing_jar=`readlink -f $1`
tstp_solutions_dir=`readlink -f $2`
output_dir=$3
mkdir -p $output_dir
cd $output_dir
echo -n >input_proofs
for seq_name in \
LinearExampleProof \
LinearEqExampleProof \
SquareDiagonalExampleProof \
SquareEdgesExampleProof \
SquareEdges2DimExampleProof \
SumExampleProof \
SumOfOnesF2ExampleProof \
SumOfOnesFExampleProof \
SumOfOnesExampleProof \
UniformAssociativity3ExampleProof \
FactorialFunctionEqualityExampleProof \
FactorialFunctionEqualityExampleProof2 \
do
for i in {0..100}; do
echo "$seq_name($i)" "proofseq-${seq_name}-${i}.termset" >>input_proofs
done
done
find $tstp_solutions_dir -not -type d -name \*.s | \
perl -ne 'chop;m,/([^/]+)/([^/]+)\.[^./]+\.s$,;print"$_ tstp-$1-$2.termset\n"' \
>>input_proofs
shuf -o input_proofs input_proofs
parallel --timeout 60 \
--colsep ' ' \
--joblog joblog \
--progress --eta --bar \
"echo {1}; java -cp $gapt_testing_jar \
-Xmx1G -Xss40m \
-XX:ParallelGCThreads=1 -XX:ConcGCThreads=1 \
gapt.testing.dumpTermset {1} {2} 2>&1" \
:::: input_proofs >>stdout || true
|
gapt/gapt
|
testing/dump-termsets.sh
|
Shell
|
gpl-3.0
| 1,229 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libsmt-lib.${CND_DLIB_EXT}
OUTPUT_BASENAME=libsmt-lib.${CND_DLIB_EXT}
PACKAGE_TOP_DIR=libsmt-lib.so/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/libsmt-lib.so/lib"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libsmt-lib.so.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/libsmt-lib.so.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
oRatioSolver/o-ratio
|
smt-lib/nbproject/Package-Release.bash
|
Shell
|
gpl-3.0
| 1,503 |
#!/bin/bash
rm -f storage/smoke/test_large.bin
rm -f storage/smoke/test1.txt
test -d storage/smoke && rmdir storage/smoke
rm -f metadata/8732d71b-077e-49ed-9222-b1177280de1e
rm -f NOTIFICATION
rm -f run-job.log
rm -f scheduler.log
rm -f storage.log
rm -f metaman.log
|
xuanhan863/bakapy
|
acceptance/test_run_job_smoke/clean.sh
|
Shell
|
gpl-3.0
| 268 |
#!/bin/sh
# File: test-mtn2svn-dir-rename-delete1.sh
# needs: test-mtn2svn.include
#
# Test converting revisions from Monotone to Subversion and back to Monotone again.
# Special: Remove directory after moving files from there.
# Tailer needs to rename file first, than remove directory!
# A more complexe testing script is 'test-mtn2svn-dir-rename-delete2.sh'
#
# ERROR 1: Fixed by 'monotone-dir-move-and-del.patch'
# ERROR 2: Needs to be fix in the svn parser
# ERROR 1:
# --------
# 2007-06-16 14:46:06 CRITICAL: Cannot rename 'testdir/rootdir/svnside/file' back to 'testdir/rootdir/svnside/somedir/file'
# 2007-06-16 14:46:06 ERROR: Failure replaying: Revision: c85d76cc6d99fae438caaa16e4c7f7238a9c17ce
# Date: 2007-06-16 12:46:02+00:00
# Author: key-dummy
# Entries: somedir(DEL at c85d76cc6d99fae438caaa16e4c7f7238a9c17ce), file(REN from somedir/file)
# Log: changes
#
# linearized ancestor: e8713b95b73fc42b353f07454849d4b517104167
# real ancestor(s): e8713b95b73fc42b353f07454849d4b517104167
# Traceback (most recent call last):
# File "tailor-0.9.28-henry/vcpx/target.py", line 117, in replayChangeset
# self._replayChangeset(changeset)
# File "tailor-0.9.28-henry/vcpx/target.py", line 320, in _replayChangeset
# action(group)
# File "tailor-0.9.28-henry/vcpx/target.py", line 477, in _renameEntries
# self._renamePathname(e.old_name, e.name)
# File "tailor-0.9.28-henry/vcpx/repository/svn.py", line 732, in _renamePathname
# rename(newpath, oldpath)
# OSError: [Errno 2] No such file or directory
# >>> output from "mtn diff" >>>
# delete "somedir"
#
# rename "somedir/file"
# to "file"
# <<< end <<<
# ERROR 2:
# --------
# Changelog are different. The "rename somedir/file file" will list as
# "delete somedir/file" and "add file".
# This error comes from svn to monotone (the svn parser).
#
# File state is OK. Only the log is not complete.
. ./test-mtn2svn.include
monotone_setup
# Create dirs, files, 2 revisions
mkdir "somedir"
touch "somedir/file"
mtn_exec add * --recursive
mtn_exec commit --message "initial commit"
# file renames
mtn_exec rename "somedir/file" "file"
# dir deletes
mtn_exec drop "somedir"
mtn_exec commit --message "changes"
testing_runs
|
lelit/tailor
|
test-scripts/test-mtn2svn-dir-rename-delete1.sh
|
Shell
|
gpl-3.0
| 2,223 |
#!/bin/bash
# Demonstrate bash scopes.
INSTRUCTOR='Jerry'
# we have access to the global scope in a function
get_instructor1() {
echo $INSTRUCTOR
}
# **will** modify the value in the global scope
get_instructor2() {
INSTRUCTOR='Jerry'
}
# will NOT modify the value in the global scope
get_instructor3() {
local INSTRUCTOR='Elvis'
}
# will place student IN GLOBAL SCOPE
get_instructor4() {
STUDENT='Jack'
}
get_instructor1
get_instructor2
get_instructor3
get_instructor4
echo Now the value of INSTRUCTOR in the global scope is: $INSTRUCTOR
echo Now the value of STUDENT in the global scope is: $STUDENT
|
qjcg/shell-examples
|
scopes.sh
|
Shell
|
gpl-3.0
| 613 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x wikiflow-sessioncompute_5-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n wikiflow::sessioncompute_5:1.0 -N ID0000008 -R condorpool -L example_workflow -T 2017-01-27T02:51:42+00:00 ./wikiflow-sessioncompute_5-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/logs/w-11_2/20170127T025143+0000/00/00/sessioncompute_5_ID0000008.sh
|
Shell
|
gpl-3.0
| 1,228 |
#!/bin/bash
#BSUB -J "snp[1-28]"
#BSUB -q normal
##BSUB -W 4:00
#BSUB -o output.%I
#BSUB -e error.%I
WD=/lustre/scratch116/casm/cgp/users/tn5/julia/fixvsnonfix
cd $WD
BAM=`ls bams/*.bam | head -n $LSB_JOBINDEX | tail -n 1`
SAMPLE=`basename $BAM | sed 's/.bam//'`
/nfs/users/nfs_t/tn5/apps/samtools/samtools view $BAM | wc -l >depth/${SAMPLE}.total
|
TravisCG/SI_scripts
|
total.sh
|
Shell
|
gpl-3.0
| 352 |
# Build script for High Fidelity VR Platform - Debian 10 (Buster / Testing) edition.
# (c) 2017 J.C.L -- aka the virtual persona, Omega Heron.
# GPL 3.0
#
# No support will be given, use at your own risk and read all instructions before using.
#
# If you would like to improve this script then by all means offer a pull request.
# Do not file comments saying you should do this or that, do it yourself and offer a pull request.
# There's no assurance this will work ongoing -- It depends upon High Fidelity not breaking its
# make files (a routine occurence) and certain packages being at certain level in Buster. Since
# Buster is a moving target for library versions it may, eventually, become incompatible or need
# some changes to methods here.
#
#!/bin/bash
# This looks for most recent tag and builds or warns and exits if already built.
set -e
export QT_CMAKE_PREFIX_PATH=/usr/lib/x86_64-linux-gnu/cmake
export HFSRC=/home/$USER/src/hifi
export HFBINBASE=/home/$USER/hifi
export CXX='g++ -std=c++11'
if [ ! -e $HFSRC ]; then
mkdir -p $HFSRC
git clone http://github.com/highfidelity/hifi $HFSRC
fi
cd $HFSRC
git checkout master
git pull
export RELEASE_NUMBER=$(/usr/bin/git describe --tags $(git rev-list --tags --max-count=1) | sed 's/RELEASE-//g')
# Set destination to install base + tag version
export HFBIN=$HFBINBASE/$RELEASE_NUMBER
# Check to see if it's already made...
if [ -e $HFBIN ]; then
read -p "Already built. Press R or any other key to exit." -n 1 -r
if [[ ! $REPLY =~ ^[Rr]$ ]]; then
printf "\nAborted build.\n\n"
exit 1
fi
fi
# Away we go.
git checkout tags/RELEASE-$RELEASE_NUMBER
export RELEASE_TYPE=PRODUCTION
export BRANCH=stable
# Never ever ever ever start with a build dir containing any remnants of previous builds - only sorrow results if you do.
rm -rf $HFSRC/build
mkdir -p $HFSRC/build
cd $HFSRC/build
cmake .. -DUSE_LOCAL_TBB=1 -DUSE_LOCAL_SDL2=1 -DUSE_LOCAL_GLM=1 -DCMAKE_BUILD_TYPE=Release
# Note: -j2 could be much higher, like -j8 if you want to max out compile speed on an i7 or even higher if you have more "cores"
make -j2 domain-server assignment-client pcmCodec hifiCodec interface
# Insure nothing from a previous compile exists in destination dir
rm -rf $HFBIN
# Create destination dir and required plugins dir under destination
mkdir -p $HFBIN/plugins
# Now the chrpath and patchelf hackery begins to clean up lib references and "install" binaries/resources
cd $HFSRC/build/assignment-client
strip -s assignment-client
chrpath -r $HFBIN assignment-client
mv assignment-client $HFBIN
cd $HFSRC/build/assignment-client/plugins
strip libhifiCodec.so
mv libhifiCodec.so $HFBIN/plugins
strip libpcmCodec.so
mv libpcmCodec.so $HFBIN/plugins
cd $HFSRC/build/domain-server
strip -s domain-server
mv domain-server $HFBIN
cp -r $HFSRC/domain-server/resources/ $HFBIN
cd $HFSRC/build/ext/makefiles/quazip/project/lib
strip libquazip5.so.1.0.0
mv libquazip5.so $HFBIN
mv libquazip5.so.1 $HFBIN
mv libquazip5.so.1.0.0 $HFBIN
cd $HFSRC/build/ext/makefiles/bullet/project/lib
strip libBulletCollision.so.2.83
strip libBulletDynamics.so.2.83
strip libBulletSoftBody.so.2.83
strip libLinearMath.so.2.83
patchelf --set-rpath $HFBIN libBulletCollision.so.2.83
patchelf --set-rpath $HFBIN libBulletDynamics.so.2.83
patchelf --set-rpath $HFBIN libBulletSoftBody.so.2.83
mv libBulletCollision.so.2.83 $HFBIN
mv libBulletDynamics.so.2.83 $HFBIN
mv libBulletSoftBody.so.2.83 $HFBIN
mv libLinearMath.so.2.83 $HFBIN
mv libBulletCollision.so $HFBIN
mv libBulletDynamics.so $HFBIN
mv libBulletSoftBody.so $HFBIN
mv libLinearMath.so $HFBIN
cd $HFSRC/build/ext/makefiles/nvtt/project/lib
strip libnvcore.so
strip libnvimage.so
strip libnvmath.so
strip libnvtt.so
patchelf --set-rpath $HFBIN libnvimage.so
patchelf --set-rpath $HFBIN libnvtt.so
mv libnvcore.so $HFBIN
mv libnvimage.so $HFBIN
mv libnvmath.so $HFBIN
mv libnvtt.so $HFBIN
cd $HFSRC/build/interface
strip -s interface
chrpath -r $HFBIN interface
mv interface $HFBIN
cd $HFSRC/build/interface/plugins
strip libhifiCodec.so
mv libhifiCodec.so $HFBIN/plugins
strip libhifiSdl2.so
chrpath -r $HFBIN libhifiSdl2.so
mv libhifiSdl2.so $HFBIN/plugins
strip libhifiSixense.so
chrpath -r $HFBIN libhifiSixense.so
mv libhifiSixense.so $HFBIN/plugins
strip libpcmCodec.so
mv libpcmCodec.so $HFBIN/plugins
cd $HFSRC/build/interface
cp -r $HFSRC/build/interface/resources/ $HFBIN
cp -r $HFSRC/build/interface/scripts/ $HFBIN
cd $HFSRC/build/ext/makefiles/polyvox/project/lib/Release
mv libPolyVoxCore.so $HFBIN
mv libPolyVoxCore.so.0 $HFBIN
strip libPolyVoxCore.so.0.2.1
mv libPolyVoxCore.so.0.2.1 $HFBIN
cd $HFSRC/build/ext/makefiles/steamworks/project/src/steamworks/redistributable_bin/linux64
strip libsteam_api.so
chmod a+r libsteam_api.so
mv libsteam_api.so $HFBIN
cd $HFSRC/build/ext/makefiles/sixense/project/src/sixense/lib/linux_x64/release
strip libsixense_utils_x64.so
strip libsixense_x64.so
mv libsixense_utils_x64.so $HFBIN
mv libsixense_x64.so $HFBIN
|
OmegaHeron/hifi-buildscripts
|
buster/buster_buildhf.sh
|
Shell
|
gpl-3.0
| 5,009 |
#!/bin/sh
# script file for running the Calibre2Opds program in CLI mode on Unix-like systems
# such as Linux and Mac.
c2o_jar=OpdsOutput-3.6-SNAPSHOT.jar
# We set Java VM stack limits explicitly here to get consistency across systems
#
# -Xms<value> define starting size
# -Xmx<value> defines maximum size
# -Xss<value> defines stack size
#
# It is possible that for very large libraries this may not be enough - we will have to see.
c2o_opts="-Xms128m -Xmx512m"
old=`pwd`
scriptdir=`dirname "$0"`
# Check we know how to run from where binaries are located
if [ ! -f $c2o_jar ]; then
if [ ! -f $scriptdir/$c2o_jar ]; then
echo "ERROR: calibre2opds binaries not found"
exit -1
fi
cd $scriptdir
fi
# The next few lines are to help with running in Portable mode with minimal user setup required
if [ "$CALIBRE2OPDS_CONFIG" = "" ]; then
if [ -d $scriptdir/Calibre2OpdsConfig ]; then
CALIBRE2OPDS_CONFIG=Calibre2OpdsConfig
export CALIBRE2OPDS_CONFIG
fi
fi
echo "Starting calibre2opds"
echo java $c2o_opts $1 -cp $c2o_jar Cli
java $c2o_opts $1 -cp $c2o_jar Cli $*
cd $old
|
calibre2opds/calibre2opds
|
script/run.sh
|
Shell
|
gpl-3.0
| 1,102 |
#!/bin/sh
#
# Copyright (C) 2014, 2015 Internet Systems Consortium, Inc. ("ISC")
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
# $Id: tests.sh,v 1.22 2012/02/09 23:47:18 tbox Exp $
SYSTEMTESTTOP=..
. $SYSTEMTESTTOP/conf.sh
status=0
n=0
getsit() {
awk '$2 == "SIT:" {
print $3;
}' < $1
}
fullsit() {
awk 'BEGIN { n = 0 }
// { v[n++] = length(); }
END { print (v[1] == v[2]); }'
}
havetc() {
grep 'flags:.* tc[^;]*;' $1 > /dev/null
}
for bad in bad*.conf
do
ret=0
echo "I:checking that named-checkconf detects error in $bad"
$CHECKCONF $bad > /dev/null 2>&1
if [ $? != 1 ]; then echo "I:failed"; ret=1; fi
status=`expr $status + $ret`
done
n=`expr $n + 1`
echo "I:checking SIT token returned to empty SIT option ($n)"
ret=0
$DIG +qr +sit version.bind txt ch @10.53.0.1 -p 5300 > dig.out.test$n
grep SIT: dig.out.test$n > /dev/null || ret=1
grep "status: NOERROR" dig.out.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo "I:checking response size without SIT ($n)"
ret=0
$DIG large.example txt @10.53.0.1 -p 5300 +ignore > dig.out.test$n
havetc dig.out.test$n || ret=1
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo "I:checking response size without valid SIT ($n)"
ret=0
$DIG +sit large.example txt @10.53.0.1 -p 5300 +ignore > dig.out.test$n
havetc dig.out.test$n || ret=1
grep "; SIT:.*(good)" dig.out.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo "I:checking response size with SIT ($n)"
ret=0
$DIG +sit large.example txt @10.53.0.1 -p 5300 > dig.out.test$n.l
sit=`getsit dig.out.test$n.l`
$DIG +qr +sit=$sit large.example txt @10.53.0.1 -p 5300 +ignore > dig.out.test$n
havetc dig.out.test$n && ret=1
grep "; SIT:.*(good)" dig.out.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo "I:checking response size with SIT recursive ($n)"
ret=0
$DIG +qr +sit=$sit large.xxx txt @10.53.0.1 -p 5300 +ignore > dig.out.test$n
havetc dig.out.test$n && ret=1
grep "; SIT:.*(good)" dig.out.test$n > /dev/null || ret=1
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo "I:checking SIT is learnt for TCP retry ($n)"
ret=0
$DIG +qr +sit large.example txt @10.53.0.1 -p 5300 > dig.out.test$n
linecount=`getsit dig.out.test$n | wc -l`
if [ $linecount != 3 ]; then ret=1; fi
checkfull=`getsit dig.out.test$n | fullsit`
if [ $checkfull != 1 ]; then ret=1; fi
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
n=`expr $n + 1`
echo "I:checking for SIT value in adb ($n)"
ret=0
$RNDC -c ../common/rndc.conf -s 10.53.0.1 -p 9953 dumpdb
sleep 1
grep "10.53.0.2.*\[sit=" ns1/named_dump.db > /dev/null|| ret=1
if [ $ret != 0 ]; then echo "I:failed"; fi
status=`expr $status + $ret`
echo "I:exit status: $status"
exit $status
|
krichter722/bind9
|
bin/tests/system/sit/tests.sh
|
Shell
|
gpl-3.0
| 3,663 |
#!/bin/sh
# Get the script's source directory
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
JAVA=`which java 2>/dev/null`
$JAVA -Xmx2G -jar "${DIR}/vlo-statistics-${project.version}.jar" "$@"
|
acdh-oeaw/vlo-curation
|
vlo-statistics/src/bin/start.sh
|
Shell
|
gpl-3.0
| 215 |
#!/bin/bash -x
echo "CREATE TABLE gi_taxid_prot (gi integer PRIMARY KEY, tax_id integer);" | sqlite3 vhunter.db
echo -e '.separator "\t"\n.import /scratch/dwlab/databases/taxdump_20160802/gi_taxid_prot.dmp gi_taxid_prot\n'| sqlite3 vhunter.db
|
guoyanzhao/VirusSeeker-Virome
|
build_db_gi_taxid_prot.sh
|
Shell
|
gpl-3.0
| 244 |
export GOPATH=$ZSH_CACHE/go
export GO111MODULE=on
export GOPROXY=https://goproxy.io,direct
export GOSUMDB=gosum.io+ce6e7565+AY5qEHUk/qmHc5btzW45JVoENfazw8LielDsaI+lEbq6
# export GOPRIVATE=*.corp.example.com
# godoc color
export GODOCC_STYLE='native'
export PATH=$GOPATH/bin:$PATH
|
wow4me/.zshrc.d
|
plugins/go.plugin.zsh
|
Shell
|
gpl-3.0
| 282 |
#!/usr/bin/env bash
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
_startdir=$(pwd)
shopt -s extglob
shopt -s nullglob
for _package in !(*usrbinmove)+(.pkg.tar.xz); do
_name=${_package/.pkg.tar.xz}
rm -rf $_name
mkdir $_name
bsdtar -xf $_package -C $_name
cd $_name
for _dir in bin sbin usr/sbin; do
if [ -d $_dir ]; then
mkdir -p usr/bin
mv -n $_dir/* usr/bin
rmdir $_dir || exit
fi
done
bsdtar -cf - .??* * | xz -c -z - > $_startdir/$_name-usrbinmove.pkg.tar.xz
cd $_startdir
rm -r $_name
echo "${_name/-*} done"
done
echo "All done. Now install with pacman -U *usrbinmove*"
|
WorMzy/converttousrbin
|
converttousrbin.sh
|
Shell
|
gpl-3.0
| 1,245 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="lr-yabause"
rp_module_desc="Sega Saturn emu - Yabause (optimised) port for libretro"
rp_module_menus="4+"
rp_module_flags="!armv6"
function sources_lr-yabause() {
gitPullOrClone "$md_build" https://github.com/libretro/yabause.git
}
function build_lr-yabause() {
cd libretro
make clean
if isPlatform "neon"; then
make platform=armvneonhardfloat
else
make
fi
md_ret_require="$md_build/libretro/yabause_libretro.so"
}
function install_lr-yabause() {
md_ret_files=(
'libretro/yabause_libretro.so'
'yabause/AUTHORS'
'yabause/COPYING'
'yabause/ChangeLog'
'yabause/AUTHORS'
'yabause/GOALS'
'yabause/README'
'yabause/README.LIN'
)
}
function configure_lr-yabause() {
mkRomDir "saturn"
ensureSystemretroconfig "saturn"
addSystem 1 "$md_id" "saturn" "$md_inst/yabause_libretro.so"
}
|
MarcLandis/RetroPie-Setup
|
scriptmodules/libretrocores/lr-yabause.sh
|
Shell
|
gpl-3.0
| 1,335 |
docker run --rm --name pg -e POSTGRES_PASSWORD=password -e POSTGRES_USER=oxycoin -e POSTGRES_DB=oxycoin_db_main -p 5432:5432 postgres:9.6-alpine
|
Oxycoin/oxy-node
|
.devutils/startdb.sh
|
Shell
|
gpl-3.0
| 146 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x wikiflow-longestsession_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n wikiflow::longestsession_0:1.0 -N ID0000004 -R condorpool -L example_workflow -T 2017-01-21T14:59:36+00:00 ./wikiflow-longestsession_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/instances/4_wikiflow_1sh_1s_noannot/dags/ubuntu/pegasus/example_workflow/20170121T145937+0000/00/00/longestsession_0_ID0000004.sh
|
Shell
|
gpl-3.0
| 1,228 |
#!/bin/bash
./run.sh -example.1 greedy
|
losvald/pco
|
data/example1-greedy.sh
|
Shell
|
gpl-3.0
| 39 |
#!/usr/bin/perl
open($file,"<","./test") || die $!;
while (<$file>) {
$_ =~ /(\d+)/;
$number = $1;
$number = qx(doz $number);
chomp($number);
$line = $_;
$line =~ s/\d+/$number/;
print $line;
}
|
dgoodmaniii/dozenal-droid
|
DozcalDroid/jni/calendar/convert.sh
|
Shell
|
gpl-3.0
| 202 |
#!/bin/bash
# This script is used to initiate a multi part upload on AWS Glacier. It
# requires the name of the Glacier vault and the name of the file to be
# uploaded in that vault. It returns the upload id. Upload chunk size is set
# to 8388608 bytes (i.e. 8 MB) as a good compromise between fewer chunks but
# also the ability to easily retry uploading a failed chunk.
if [ "$#" -ne 2 ]
then
echo "Usage $0 <VAULT> <FILE_NAME>"
exit 1
fi
VAULT=$1
FILE=$2
CHUNK_SIZE=8388608
aws glacier initiate-multipart-upload --account-id - --archive-description "$FILE" \
--part-size $CHUNK_SIZE --vault-name "$VAULT"
exit 0
|
chggr/scripts-config
|
aws/glacier/init-upload.sh
|
Shell
|
gpl-3.0
| 666 |
#!/bin/sh
DIR=`dirname $0`
${DIR}/why3ml -P alt-ergo -a split_goal $1 -o $2
echo "on fait une erreur expres"
exit 2
|
zoggy/genet
|
draft/proval/why3_split.sh
|
Shell
|
gpl-3.0
| 117 |
#!/bin/bash
# author hsing.li
# 处理输出的长日志,将输出的日志放到文件xx中,用此脚本处理文件,xx_deal.txt中得到处理后的数据
# 若windows下无法运行,请安装git bash
echo "Start processing the file $1..."
> "$1""_deal.txt" # clean file
# traverse file
while read LINE
do
echo $LINE
# regular='^[0-1]\d-[0-3]\d\s\d{2}:\d{2}:\d{2}.\d{3}\s\S+\s[VDIWEA]\S+:\s$'
regular='^[0-1][0-9]-[0-3][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9][0-9].*[VDIWEA].*: '
echo -e $LINE|sed "s/${regular}//g"|tr "\n" " " >> "$1""_deal.txt"
done < $1
# read
echo ""; echo ""; echo ""
echo "End processing the file $1, the result in ""$1""_deal.txt file."
echo ""; echo ""; echo ""
int=1
while(( $int<2 ))
do
ping www.github.com >/dev/null 2>&1
let "int++"
done
|
styletodo/llog
|
processinglog.sh
|
Shell
|
gpl-3.0
| 807 |
#!/usr/bin/env bash
XSOCK=/tmp/.X11-unix
while getopts :e:x: option
do
case "${option}"
in
e) EXTENSION=${OPTARG};;
x) XRESOURCES=${OPTARG};;
*) echo $option;;
esac
done
if [ $OPTIND -eq 1 ]; then echo "No options were passed"; exit 1; fi
xhost +local:;
docker pull jeansen/cdmn_docker 2>/dev/null
[[ $(uname -s) != Linux ]] && display=host.docker.internal:0
docker run \
-e DISPLAY=${display:-$DISPLAY} \
--rm \
--volume=$XSOCK:$XSOCK:rw \
--volume=$EXTENSION:/urxvt/cdmn:ro \
--volume=$XRESOURCES:/Xresources:ro \
jeansen/cdmn_docker
xrdb -load ~/.Xresources
#Change after the following issue is resolved to always use the internal name
#https://github.com/docker/for-linux/issues/264
|
Jeansen/cdmn
|
resources/test/run.sh
|
Shell
|
gpl-3.0
| 714 |
#!/bin/bash
coreSlideshow.sh -s mpv -b rating "$@"
|
QSaman/BashScripts
|
Multimedia/Slideshow/videoplaylist.sh
|
Shell
|
gpl-3.0
| 52 |
#! /usr/bin/env bash
set -euo pipefail
# first makes some assertions about the environment and set some shared
# variables before starting the script.
if ! command -v jq > /dev/null 2>&1; then
echo "This script requires jq to work properly."
exit 1
fi
PRODUCT_NAME="${PRODUCT_NAME:-""}"
if [ -z "$PRODUCT_NAME" ]; then
echo "Missing required product name: ${PRODUCT_NAME}"
exit 1
fi
TARGET_ZIP="${TARGET_ZIP:-""}"
if [ -z "$TARGET_ZIP" ]; then
echo "Missing required target path"
exit 1
fi
# Artifactory configuration
ARTIFACTORY_ENDPOINT="${ARTIFACTORY_ENDPOINT:-"https://artifactory.hashicorp.engineering/artifactory"}"
ARTIFACTORY_INPUT_REPO="${ARTIFACTORY_INPUT_REPO:-"hc-signing-input"}"
ARTIFACTORY_OUTPUT_REPO="${ARTIFACTORY_OUTPUT_REPO:-"hc-signing-output"}"
ARTIFACTORY_TOKEN="${ARTIFACTORY_TOKEN:-""}"
ARTIFACTORY_USER="${ARTIFACTORY_USER:-""}"
if [[ -z "$ARTIFACTORY_TOKEN" || -z "$ARTIFACTORY_USER" ]]; then
echo "Missing required Artifactory credentials"
exit 1
fi
# Create the sign/notarize ID "SN_ID"
if command -v uuidgen > /dev/null 2>&1; then
uuid="$(uuidgen)"
elif [ -f /proc/sys/kernel/random/uuid ]; then
uuid="$(cat /proc/sys/kernel/random/uuid)"
else
echo "This script needs some way to generate a uuid."
exit 1
fi
SN_ID="$uuid"
# CircleCI configuration
CIRCLE_ENDPOINT="${CIRCLE_ENDPOINT:-"https://circleci.com/api/v2"}"
CIRCLE_PROJECT="${CIRCLE_PROJECT:-"project/github/hashicorp/circle-codesign"}"
CIRCLE_TOKEN="${CIRCLE_TOKEN:-""}"
if [ -z "$CIRCLE_TOKEN" ]; then
echo "Missing required CircleCI credentials"
exit 1
fi
# Next, upload an unsigned zip file to the Artifactory at
# https://artifactory.hashicorp.engineering/artifactory/hc-signing-input/{PRODUCT}/{ID}.zip
echo "Uploading unsigned zip to ${ARTIFACTORY_ENDPOINT}/${ARTIFACTORY_INPUT_REPO}/${PRODUCT_NAME}/${SN_ID}.zip"
curl --show-error --silent --fail \
--user "${ARTIFACTORY_USER}:${ARTIFACTORY_TOKEN}" \
--request PUT \
"${ARTIFACTORY_ENDPOINT}/${ARTIFACTORY_INPUT_REPO}/${PRODUCT_NAME}/${SN_ID}.zip" \
--upload-file "$TARGET_ZIP" > /dev/null
# Next, start the CircleCI Pipeline, then wait for a Workflow
# to start.
echo "Executing CircleCI job"
res="$(curl --show-error --silent --fail --user "${CIRCLE_TOKEN}:" \
--request POST \
--header 'Content-Type: application/json' \
--header 'Accept: application/json' \
--data "{ \"branch\": \"master\" ,\"parameters\": { \"PRODUCT\": \"${PRODUCT_NAME}\", \"PKG_NAME\": \"${SN_ID}.zip\" } }" \
"${CIRCLE_ENDPOINT}/${CIRCLE_PROJECT}/pipeline")"
pipeline_id="$(echo "$res" | jq -r '.id')"
echo "CircleCI Pipeline $pipeline_id started"
echo -n "Retrieving CircleCI Workflow ID"
# 24 * 5 seconds = 2 minutes
counter=12
workflow_id=""
# wait until a Workflow ID is found
until [ "$workflow_id" != "" ]; do
echo -n "."
workflow_id=$(curl --silent --fail --user "${CIRCLE_TOKEN}:" \
--request GET \
--header 'Accept: application/json' \
"${CIRCLE_ENDPOINT}/pipeline/${pipeline_id}/workflow" \
| jq -r '.items[].id'
)
if [ "$counter" -eq "0" ]; then
echo "Tried too many times, but Pipeline ${pipeline_id} still has no Workflows"
exit 1
fi
counter=$((counter - 1))
sleep 5
done
echo ""
echo "CircleCI Workflow $workflow_id started"
# Next, wait for the Workflow to reach a terminal state, then fails if it isn't
# "success"
echo -n "Waiting for CircleCI Workflow ID: ${workflow_id}"
# 360 * 5 seconds = 30 minutes
counter=360
finished="not_run"
# wait for one of the terminal states: ["success", "failed", "error", "canceled"]
until [[ "$finished" == "success" || "$finished" == "failed" || "$finished" == "error" || "$finished" == "canceled" ]]; do
echo -n "."
finished=$(curl --silent --fail --user "${CIRCLE_TOKEN}:" \
--header 'Accept: application/json' \
"${CIRCLE_ENDPOINT}/workflow/${workflow_id}" \
| jq -r '.status'
)
if [ "$counter" -eq "0" ]; then
echo "Tried too many times, but workflow is still in state ${finished}"
exit 1
fi
counter=$((counter - 1))
sleep 5
done
echo ""
if [ "$finished" != "success" ]; then
echo "Workflow ID ${workflow_id} ${finished}"
exit 1
fi
# Next, download the signed zip from Artifactory at
# https://artifactory.hashicorp.engineering/artifactory/hc-signing-output/{PRODUCT}/{ID}.zip
echo "Retrieving signed zip from ${ARTIFACTORY_ENDPOINT}/${ARTIFACTORY_OUTPUT_REPO}/${PRODUCT_NAME}/${SN_ID}.zip"
curl --show-error --silent --fail --user "${ARTIFACTORY_USER}:${ARTIFACTORY_TOKEN}" \
--request GET \
"${ARTIFACTORY_ENDPOINT}/${ARTIFACTORY_OUTPUT_REPO}/${PRODUCT_NAME}/${SN_ID}.zip" \
--output "signed_${SN_ID}.zip"
signed_checksum=$(
curl --silent --show-error --fail --user "${ARTIFACTORY_USER}:${ARTIFACTORY_TOKEN}" \
--head \
"${ARTIFACTORY_ENDPOINT}/${ARTIFACTORY_OUTPUT_REPO}/${PRODUCT_NAME}/${SN_ID}.zip" \
| grep -i "x-checksum-sha256" | awk 'gsub("[\r\n]", "", $2) {print $2;}'
)
echo "${signed_checksum} signed_${SN_ID}.zip" | gsha256sum -c
mv "signed_${SN_ID}.zip" "$TARGET_ZIP"
|
dave2/packer
|
scripts/codesign_example.sh
|
Shell
|
mpl-2.0
| 5,023 |
#!/bin/bash
echo "Removing upload files..."
rm -rf /tmp/data
echo "Removing leftover leases and persistent rules..."
rm /var/lib/dhcp/*
echo "Making sure Udev doesn't block our network..."
rm /etc/udev/rules.d/70-persistent-net.rules
mkdir /etc/udev/rules.d/70-persistent-net.rules
rm -rf /dev/.udev/
rm /lib/udev/rules.d/75-persistent-net-generator.rules
echo "Adding a 2 sec delay to the interface up, to make the dhclient happy"
echo "pre-up sleep 2" >> /etc/network/interfaces
|
ImpressCMS/packer-impresscms-devbox
|
scripts/cleanup.sh
|
Shell
|
mpl-2.0
| 485 |
#!/usr/bin/bash
mysql
-- tables
USE audit;
source /var/www/webroot/radicore/audit/sql/mysql/audit-schema.sql
USE dict;
source /var/www/webroot/radicore/dict/sql/mysql/dict-schema.sql
USE menu;
source /var/www/webroot/radicore/menu/sql/mysql/menu-schema.sql
USE workflow;
source /var/www/webroot/radicore/workflow/sql/mysql/workflow-schema.sql
USE classroom;
source /var/www/webroot/radicore/classroom/sql/mysql/classroom-schema.sql
USE product;
source /var/www/webroot/radicore/product/sql/mysql/product-schema.sql
USE survey;
source /var/www/webroot/radicore/survey/sql/mysql/survey-schema.sql
USE xample;
source /var/www/webroot/radicore/xample/sql/mysql/xample-schema.sql
-- data
-- No audit-data.sql
USE dict;
source /var/www/webroot/radicore/dict/sql/mysql/dict-data.sql
USE menu;
source /var/www/webroot/radicore/menu/sql/mysql/menu-data.sql
source /var/www/webroot/radicore/menu/sql/mysql/mnu-user.sql
USE workflow;
source /var/www/webroot/radicore/workflow/sql/mysql/workflow-data.sql
USE classroom;
source /var/www/webroot/radicore/classroom/sql/mysql/classroom-data.sql
USE product;
source /var/www/webroot/radicore/product/sql/mysql/product-data.sql
USE survey;
source /var/www/webroot/radicore/survey/sql/mysql/survey-data.sql
USE xample;
source /var/www/webroot/radicore/xample/sql/mysql/xample-data.sql
-- Dict and Menu import of exported data for prototypes
USE dict;
source /var/www/webroot/radicore/classroom/sql/classroom.dict_export.sql
source /var/www/webroot/radicore/product/sql/product.dict_export.sql
source /var/www/webroot/radicore/survey/sql/survey.dict_export.sql
source /var/www/webroot/radicore/xample/sql/xample.dict_export.sql
USE menu;
source /var/www/webroot/radicore/classroom/sql/crss.menu_export.sql
source /var/www/webroot/radicore/product/sql/product.menu_export.sql
source /var/www/webroot/radicore/survey/sql/survey.menu_export.sql
source /var/www/webroot/radicore/xample/sql/xample.menu_export.sql
quit
|
apmuthu/radicore
|
utils/create_tables.sh
|
Shell
|
agpl-3.0
| 1,956 |
#!/bin/bash
./makeWordList.sh && ./sortWordList.js && \
# ./wordListTrans.js &&\
( ./transWordPhon.js & ./transWordUnique.js ); wait && \
./rootWordPhon.js && \
./transAllLists.js
|
elspru/spel
|
src/vocab/gen/genAll.sh
|
Shell
|
agpl-3.0
| 181 |
#!/bin/sh
pip install --upgrade pip
pip install --upgrade pylint
pip install --upgrade -v -r requirements.txt
pip install --upgrade django-authopenid
export SECRET_KEY='coin'
./authentic2-ctl syncdb --noinput --all
./authentic2-ctl migrate --fake
./authentic2-ctl validate
(pylint -f parseable --rcfile /var/lib/jenkins/pylint.django.rc authentic2/ | tee pylint.out) || /bin/true
|
pu239ppy/authentic2
|
jenkins.sh
|
Shell
|
agpl-3.0
| 381 |
#!/bin/bash
shopt -s extglob
FILES_IMPORT_1="PPN-DaKaR-ZS-Reihen_1_Import_table*.csv"
FILE_IMPORT_2="PPN-DaKaR-ZS-Reihen_2_Import_table1.csv"
FILE_IMPORT_3="PPN-DaKaR-ZS-Reihen_3_Import_table1.csv"
TMPDIR="./all_tables"
CONVERT_TO_K10PLUS_PPN_FILE="PPN-DaKaR-ZS-Reihen_1_Import.csv"
#Convert to UTF-8 CSV-File
for file in "$FILES_IMPORT_1"; do
sed -e '1,2d' $file | awk -F',' \
'{"kchashmgr get /usr/local/tmp/KCKONKORDANZ.db " $1 | getline new_ppn; \
printf "%s", new_ppn; for (column = 2; column <=NF; ++column) printf FS$column; print NL}'
done
# Remove ZDB and Zeder column to match our PPN -> Dakar-Abbreviations
# We use gawk since ordinary cut does not cope with comma in quotes
gawk '{print $1, $4}' FPAT="([^,]+)|(\"[^\"]+\")" OFS="," "$FILE_IMPORT_2" | sed -e '1,2d'
gawk '{print $1, $2}' FPAT="([^,]+)|(\"[^\"]+\")" OFS="," "$FILE_IMPORT_3" | sed -e '1,2d' | sed -e 's/\"//g'
|
ubtue/ub_tools
|
cpp/dakar_tools/generate_find_of_discovery_mappings.sh
|
Shell
|
agpl-3.0
| 916 |
#! /bin/sh
# Assumes the script is executed in the /path/to/scripts/data/ directory
# and the sources have been compiled in a bin directory also located
# in the /path/to/scripts/data directory
java -cp bin:"lib/*" org.akvo.gae.remoteapi.RemoteAPI CorrectFolderSurveyPath $1 "$2" "$3" $4
|
akvo/akvo-flow
|
scripts/data/correct_folder_survey_path.sh
|
Shell
|
agpl-3.0
| 290 |
#!/bin/sh
echo ========== Set Up Environment ==========
echo NGINX_HOME is $NGINX_HOME
|
DreamInSun/cyan.img.Nginx
|
shell/ulimit.sh
|
Shell
|
agpl-3.0
| 87 |
#!/bin/bash
set -e
php -r '
$dbhost = @$_ENV["DB_HOST"] ?: "db";
$dbname = @$_ENV["DB_NAME"] ?: "mapas";
$dbuser = @$_ENV["DB_USER"] ?: "mapas";
$dbpass = @$_ENV["DB_PASS"] ?: "mapas";
$pdo = null;
echo "\naguardando o banco de dados subir corretamente...";
while(true){
try {
$pdo = new PDO("pgsql:host={$dbhost};port=5432;dbname={$dbname};user={$dbuser};password={$dbpass}");
echo "\nconectado com sucesso ao banco pgsql:host={$dbhost};port=5432;dbname={$dbname};user={$dbuser};\n";
break;
} catch (Exception $e) {
echo "..";
}
sleep(1);
}
echo "\ncorrigindo status da fila de criação de cache de permissão\n\n";
$pdo->query("UPDATE permission_cache_pending SET status = 0;");
'
if ! cmp /var/www/version.txt /var/www/private-files/deployment-version >/dev/null 2>&1
then
/var/www/scripts/deploy.sh
cp /var/www/version.txt /var/www/private-files/deployment-version
else
/var/www/scripts/db-update.sh
/var/www/scripts/mc-db-updates.sh
fi
chown -R www-data:www-data /var/www/html/assets /var/www/html/files /var/www/private-files
nohup /recreate-pending-pcache-cron.sh &
touch /mapas-ready
exec "$@"
|
hacklabr/mapasculturais
|
compose/entrypoint.sh
|
Shell
|
agpl-3.0
| 1,176 |
#!/bin/sh
resources="icon.bmp picture.xbm sail.bmp sample.bmp sample.wav utf8.txt player.bmp"
for source in *.c
do
# is a test if it's executable (and not testnative cause it isn't ported)
if grep -q main $source && ! echo $source | grep testnative
then
local project=`echo $source | sed -e 's/.c$//'`
tests=`echo $tests $project`
fi
done
echo "Tests supported : $tests"
prepare() {
#patches
for project in $tests
do
source="$project.c"
#patch the source if needed
perl dir_patch.pl $source $resources
echo "Creating project $project"
mkdir -p $project/source $project/include $project/data_bin
cp Makefile.psl1ght $project/Makefile
if grep -q common.h $source
then
echo "\tneeds common.c and icon.bmp"
cp common.h $project/include
cp common.c $project/source
cp icon.bmp $project/data_bin
fi
cp $source $project/source
#copy resources if needed
for res in $resources
do
grep -q $res $source && cp -v $res $project/data_bin
done
done
}
build() {
cwd=`pwd`
for i in $tests
do
if [ -d $i ]
then
echo Building $i
cd $i
make || return 1
cd $cwd
fi
done
}
clean () {
for i in $tests
do
if [ -d $i ]
then
echo Cleaning $i
rm -Rf $i
fi
done
}
case $1 in
build) build ;;
clean) clean ;;
prep) prepare ;;
all) clean && prepare && build ;;
*) echo "usage $0 build|clean|prep|all"
esac
|
cebash/SDL_PSL1GHT
|
test/script.sh
|
Shell
|
lgpl-2.1
| 1,393 |
#!/bin/bash
export SCRIPT_DIR=$(dirname $(readlink -f ${BASH_SOURCE:-$0}))
#未コミットの変更があったら何もしない
if [ ! -z "$(git status --porcelain)" ]; then
echo Reposity is not clean.
exit 1
fi
cd $SCRIPT_DIR
git rm -r MSYS2Private
git clone https://github.com/eighttails/MSYS2Private.git
rm -rf MSYS2Private/.git*
git add MSYS2Private
git commit -m'MSYS2Privateを更新'
|
eighttails/PC6001VX
|
win32/syncMSYS2Private.sh
|
Shell
|
lgpl-2.1
| 400 |
#! /bin/ksh
# Script : Ca_Va.sh
# Description : Dit si ça va bien
# ID DECLENCHE : va
#############################################################
# VARIABLES
#############################################################
#############################################################
# DIFFUSION DE LA REPONSE
#############################################################
# Voix
say -v "${VOX}" "ça va, et toi ?"
echo "RETURN:ça va, et toi ?:$(date +%H:%M)" >> ${HISTORIQUE_FILE}
# Fermeture du terminal
#osascript -e 'tell application "Terminal" to quit' &
|
kimous/Jarvis
|
plugins/Ca_Va.sh
|
Shell
|
lgpl-3.0
| 564 |
#!/bin/bash
#image name
IMAGE_NAME="knetminer-pig-dev"
# current $SPECIES_DIR.
SPECIES_DIR=`pwd | rev | cut -d '/' -f1 | rev`
#copy .dockerignore-template and .gitignore settings.
cd ../..
cp .dockerignore-template .dockerignore
cat .gitignore >> .dockerignore
# add all species/ sub-folders to .dockerignore except KnetMiner/pom.xml, common/ and species/$SPECIES_DIR.
cd species/
ls -1 | grep -v $SPECIES_DIR | sed 's/^/species\//' | grep -v pom.xml | grep -v common >> .dockerignore
cd ..
#pass docker image build parameters
docker image build \
--build-arg tax_id=9823 \
--build-arg species_name="Sus scrofa" \
--build-arg species_link_name=pig \
--build-arg keywords="pig, s.scrofa, knetminer, quickstart, demo" \
--build-arg description="Pig Knetminer" \
--build-arg reference_genome=true \
--build-arg species_dir="species/$SPECIES_DIR" \
--build-arg oxl_file="PigKNET.oxl" \
--build-arg knetminer_port=8080 \
--squash -t $IMAGE_NAME \
-f common/quickstart/Dockerfile-dev .
#when done, remove .dockerignore and notify with instructions.
if [ $? -eq 0 ]; then
rm .dockerignore
cd species/$SPECIES_DIR
echo "You can run this Docker using: docker run -p8080:8080 -it --rm $IMAGE_NAME"
echo "Then access it at http://localhost:8080/client/"
echo "Note: port 8080 is the Tomcat default; replace with the knetminer_port defined in this file"
fi
|
KeywanHP/KnetMiner
|
species/pig/build-docker-dev.sh
|
Shell
|
lgpl-3.0
| 1,397 |
#!/bin/bash
############################################
### This provided as an example
### To properly copy your resolv.conf before
### and after connection to VPN, we run
### the docker container in the foreground
###
### If you prefer to put the container in the
### background, with "docker run -d" then
### you'll need a way to manage your hosts resolv.conf
### or use other means.
cat /etc/resolv.conf > ./resolv-conf.host
if [ -h /etc/resolv.conf ]
then
sudo rm /etc/resolv.conf
sudo cp ./resolv-conf.host /etc/resolv.conf
export SYMRESOLV=true
fi
DOCKERBIN=$(which docker)
${DOCKERBIN} pull mazaclub/openvpn-client
${DOCKERBIN} run \
--privileged \
--rm --net host \
--name openvpn_client \
-v /etc/resolv.conf:/etc/resolv.conf \
-v $(pwd)/ovpn:/etc/openvpn \
mazaclub/openvpn-client
set -x
if [ "$SYMRESOLV" = "true" ]
then
sudo rm /etc/resolv.conf
sudo ln -s /run/resolvconf/resolv.conf /etc/resolv.conf
else
sudo cp -av ./resolv-conf.host /etc/resolv.conf
fi
|
guruvan/docker-openvpn-client
|
host-startup/run-openvpn-client-cli.sh
|
Shell
|
unlicense
| 1,000 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.