code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -x
set -e
export PYTHONUNBUFFERED="True"
export CUDA_VISIBLE_DEVICES=$1
export LD_PRELOAD=/usr/lib/libtcmalloc.so.4
LOG="experiments/logs/rgbd_scene_single_depth.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
exec &> >(tee -a "$LOG")
echo Logging output to "$LOG"
# train FCN for single frames
time ./tools/train_net.py --gpu 0 \
--network vgg16_convs \
--weights data/imagenet_models/vgg16_convs.npy \
--imdb rgbd_scene_train \
--cfg experiments/cfgs/rgbd_scene_single_depth.yml \
--iters 40000
if [ -f $PWD/output/rgbd_scene/rgbd_scene_val/vgg16_fcn_depth_single_frame_rgbd_scene_iter_40000/segmentations.pkl ]
then
rm $PWD/output/rgbd_scene/rgbd_scene_val/vgg16_fcn_depth_single_frame_rgbd_scene_iter_40000/segmentations.pkl
fi
# test FCN for single frames
time ./tools/test_net.py --gpu 0 \
--network vgg16_convs \
--model output/rgbd_scene/rgbd_scene_train/vgg16_fcn_depth_single_frame_rgbd_scene_iter_40000.ckpt \
--imdb rgbd_scene_val \
--cfg experiments/cfgs/rgbd_scene_single_depth.yml \
--rig data/RGBDScene/camera.json
| yuxng/DA-RNN | experiments/scripts/rgbd_scene_single_depth.sh | Shell | mit | 1,060 |
#!/bin/sh
gem install xcpretty --no-rdoc --no-ri --no-document --quiet
| vbergae/VBTree | script/bootstrap.sh | Shell | mit | 73 |
#!/bin/bash
killall ffmpeg
#notify-send -t 6000 "Screencast paused"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
zenity --question --text "Would you like to resume?"
if [ $? = 0 ];then
# 'yes'
bash $DIR/screencast.sh
else
# 'no'
bash $DIR/screencast_stop.sh
fi
#killall key-mon
| babbelnedd/screencast | screencast_pause.sh | Shell | mit | 290 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2843-1
#
# Security announcement date: 2014-01-13 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:49 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - graphviz:2.26.3-14+deb7u1
#
# Last versions recommanded by security team:
# - graphviz:2.26.3-14+deb7u2
#
# CVE List:
# - CVE-2014-0978
# - CVE-2014-1236
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade graphviz=2.26.3-14+deb7u2 -y
| Cyberwatch/cbw-security-fixes | Debian_7_(Wheezy)/x86_64/2014/DSA-2843-1.sh | Shell | mit | 643 |
#!/bin/sh
set -e
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}"
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}"
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\""
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\""
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\""
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "$RESOURCE_PATH")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH"
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "$PODS_CONFIGURATION_BUILD_DIR/MWPhotoBrowser/MWPhotoBrowser.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "$PODS_CONFIGURATION_BUILD_DIR/MWPhotoBrowser/MWPhotoBrowser.bundle"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
| y500/MWPhotoBrowser | Example/Pods/Target Support Files/Pods-MWPhotoBrowser_Example/Pods-MWPhotoBrowser_Example-resources.sh | Shell | mit | 5,407 |
#! /bin/sh
echo "======== 上传远端开始 ========"
echo "======== 远端 repo 列表 ========"
git remote -v
###########################
servers=(gitlab github)
###########################
para=''
repo='master'
if [[ $1 = "--force" ]]; then
para=$1
elif [[ -n $1 ]]; then
repo=$1
fi
if [[ $2 = "--force" ]]; then
para=$2
elif [[ -n $2 ]]; then
repo=$2
fi
for (( i = 0; i < ${#servers[*]}; i++ )); do
echo "==== 同步 ${servers[i]} $repo ===="
git push $para ${servers[i]} $repo
done | hikaliv/LiBoHikalivImageMinification | gitpush.sh | Shell | mit | 505 |
docker build -t oydeu/oyd-shiny_armhf .
| OwnYourData/app-template | docker/oyd-shiny_armhf/build.sh | Shell | mit | 40 |
#!/bin/bash
# /etc/init.d/VMManager
#Change vbox to your virtualbox user
VMUSER=vbox
#Insert your virtual machines in here. The pattern is 'VM Name'*space*'VM Name', like this 'VM1' 'VM2' 'VM3'
declare -a VM=('VirtualBox-Name');
case "$1" in
start)
echo "Starting VirtualBox VM..."
cd
echo "Available VM's: ${VM[@]}"
for i in "${VM[@]}"
do
sudo -H -b -u $VMUSER /usr/bin/VBoxManage startvm "$i" --type headless
done
sleep 3
;;
stop)
echo "Saving state of Virtualbox VM..."
for i in "${VM[@]}"
do
sudo -H -u $VMUSER /usr/bin/VBoxManage controlvm "$i" savestate
done
sleep 3
;;
*)
echo "Usage: /etc/init.d/VMManager {start|stop}"
exit 1
;;
esac
exit 0
| rafaelcpalmeida/phpvirtualbox-auto-starter | VMManager.sh | Shell | mit | 973 |
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Flurbo protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 8913. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 8913 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 8913 ! -d ${LOCALNET} -j MARK --set-mark 0x2
| Flurbos/Flurbo | contrib/qos/tc.sh | Shell | mit | 1,669 |
#!/bin/bash
set -e
JDK_FEATURE=10
#TMP=$(curl -L jdk.java.net/${JDK_FEATURE})
#TMP="${TMP#*Most recent build: jdk-${JDK_FEATURE}+}" # remove everything before the number
#TMP="${TMP%%<*}" # remove everything after the number
#JDK_BUILD="$(echo -e "${TMP}" | tr -d '[:space:]')" # remove all whitespace
JDK_ARCHIVE=openjdk-10_linux-x64_bin.tar.gz
#jdk-${JDK_FEATURE}+${JDK_BUILD}_linux-x64_bin.tar.gz
cd ~
wget --no-check-certificate https://download.java.net/java/GA/jdk10/10/binaries/openjdk-10_linux-x64_bin.tar.gz
#https://download.java.net/java/jdk${JDK_FEATURE}/archive/${JDK_BUILD}/BCL/${JDK_ARCHIVE}
tar -xzf ${JDK_ARCHIVE}
export JAVA_HOME=~/jdk-${JDK_FEATURE}
export PATH=${JAVA_HOME}/bin:$PATH
cd -
echo check java version
java --version
wget https://www-eu.apache.org/dist/maven/maven-3/3.5.4/binaries/apache-maven-3.5.4-bin.zip
unzip -qq apache-maven-3.5.4-bin.zip
export M2_HOME=$PWD/apache-maven-3.5.4
export PATH=$M2_HOME/bin:$PATH
| arnaudroger/SimpleFlatMapper | .travis/install-jdk-10.sh | Shell | mit | 991 |
#!/usr/bin/env bash
# Copyright © 2011 - 2021 by Renaud Guillard ([email protected])
################################################################################
# Update ns/sh scripts and completion files
################################################################################
relativePathToRoot="../.."
scriptPath="$(dirname "${0}")"
projectPath="${scriptPath}/${relativePathToRoot}"
bashCompletionOutputPath="${projectPath}/resources/bash_completion.d"
bashCompletionStylesheetBasePath="${projectPath}/ns/xsl/program"
bashCompletionStylesheetFileName="bashcompletion.xsl"
cwd="$(pwd)"
cd "${projectPath}"
projectPath="$(pwd)"
cd "${cwd}"
# Rebuild build-shellscript.sh
${projectPath}/tools/sh/refresh-build-shellscript.sh
buildshellscript="${projectPath}/ns/sh/build-shellscript.sh"
[ -x "${buildshellscript}" ] || (echo "${buildshellscript} not executable" && exit 1)
singleBuild=""
for ((i=1;${i}<=${#};i++))
do
singleBuild="${!i}"
done
completionCommands=
appsBasePath="${projectPath}/ns/xsh/apps"
outBasePath="${projectPath}/ns/sh"
while read f
do
fn="${f%.xsh}"
b="$(basename "${f}")"
d="$(dirname "${f}")"
bn="${b%.xsh}"
[ "${d}" = '.AppleDouble' ] && continue
if [ ! -f "${fn}.xml" ]
then
continue
fi
subPath="${d#${appsBasePath}}"
shOut="${outBasePath}${subPath}/${bn}.sh"
mkdir -p "$(dirname "${shOut}")"
if [ "${bn}" != "build-shellscript" ]
then
if [ ! -z "${singleBuild}" ] && [ "${bn}" != "${singleBuild}" ]
then
continue
fi
echo "Update ${b}"
if ! ${buildshellscript} -p -x ${fn}.xml -s ${f} -o ${shOut}
then
echo "Failed to update ${f}" 1>&2
exit 1
fi
fi
if [ ! -z "${singleBuild}" ] && [ "${bn}" != "${singleBuild}" ]
then
continue
fi
schemaVersionArgs=(\
--xinclude \
--stringparam 'namespacePrefix' 'http://xsd.nore.fr/program' \
--stringparam 'defaultVersion' '2.0' \
"${projectPath}/ns/xsl/schema-version.xsl" \
)
programSchemaVersion="$(xsltproc "${schemaVersionArgs[@]}" "${fn}.xml")"
xsltproc \
--xinclude \
--stringparam prg.bash.completion.programFileExtension ".sh" \
--output "${bashCompletionOutputPath}/${bn}.sh" \
"${bashCompletionStylesheetBasePath}/${programSchemaVersion}/${bashCompletionStylesheetFileName}" \
"${fn}.xml"
done << EOF
$(find "${appsBasePath}" -mindepth 1 -maxdepth 2 -name "*.xsh")
EOF
echo "Update tools"
for f in "${projectPath}/resources/xsh/"*.xsh
do
o="$(basename "${f}")"
bn="${o%.xsh}"
o="${o%xsh}sh"
x="${f%xsh}xml"
if [ ! -z "${singleBuild}" ] && [ "${bn}" != "${singleBuild}" ]
then
continue
fi
echo "${o}"
if [ -f "${x}" ]
then
${buildshellscript} -p -x "${x}" -s "${f}" -o "${projectPath}/tools/sh/${o}"
else
xsltproc --xinclude -o "${projectPath}/tools/sh/${o}" "${projectPath}/ns/xsl/program/2.0/xsh.xsl" "${f}"
fi
done
| noresources/ns-xml | tools/sh/update-sh.sh | Shell | mit | 2,808 |
#!/usr/bin/env bash
ROOT_DIR=$PWD;
#rsync -r "$ROOT_DIR/libs/material/schematics" "$ROOT_DIR/dist/@uiux/material"
cd "$ROOT_DIR/dist/libs/cdk"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/d3"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/dal"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/device"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/firebase"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/fn"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/libs/icons"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/material"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/ngrx"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/rxjs"
echo $PWD
npm publish --access public
cd "$ROOT_DIR/dist/libs/services"
echo $PWD
npm publish --access public
cd $ROOT_DIR
echo $PWD
| UIUXEngineering/ix-material | scripts/publish.sh | Shell | mit | 906 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:0983
#
# Security announcement date: 2013-06-25 20:14:52 UTC
# Script generation date: 2017-01-01 21:14:41 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - curl.i386:7.15.5-17.el5_9
# - curl-debuginfo.i386:7.15.5-17.el5_9
# - curl.x86_64:7.15.5-17.el5_9
# - curl-debuginfo.x86_64:7.15.5-17.el5_9
# - curl-devel.i386:7.15.5-17.el5_9
# - curl-devel.x86_64:7.15.5-17.el5_9
#
# Last versions recommanded by security team:
# - curl.i386:7.15.5-17.el5_9
# - curl-debuginfo.i386:7.15.5-17.el5_9
# - curl.x86_64:7.15.5-17.el5_9
# - curl-debuginfo.x86_64:7.15.5-17.el5_9
# - curl-devel.i386:7.15.5-17.el5_9
# - curl-devel.x86_64:7.15.5-17.el5_9
#
# CVE List:
# - CVE-2013-2174
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install curl.i386-7.15.5 -y
sudo yum install curl-debuginfo.i386-7.15.5 -y
sudo yum install curl.x86_64-7.15.5 -y
sudo yum install curl-debuginfo.x86_64-7.15.5 -y
sudo yum install curl-devel.i386-7.15.5 -y
sudo yum install curl-devel.x86_64-7.15.5 -y
| Cyberwatch/cbw-security-fixes | Red_Hat_5/x86_64/2013/RHSA-2013:0983.sh | Shell | mit | 1,216 |
gradle clean build -x test
service tomcat stop
rm -rf /opt/tomcat/webapps/*
cp -v build/libs/MediNetServices.war /opt/tomcat/webapps/ROOT.war
service tomcat start | rokon12/mNet2 | deploy.sh | Shell | mit | 162 |
#!/bin/bash
NUMBER=$((1))
for file in `ls -S $1`
do
echo $1/$file
if [ -x $1/$file ]
then
echo Renaming $file to $file.$NUMBER
mv $1/$file $1/$file.$NUMBER
NUMBER=$(($NUMBER + 1))
fi
done
| olafkrawczyk/pwr_so_lab | lab0/script_4.sh | Shell | mit | 200 |
docker run \
--name redis-cache \
-v /myredis/conf/redis.conf:/usr/local/etc/redis/redis.conf --name myredis redis redis-server /usr/local/etc/redis/redis.conf \
-d \
redis:latest
docker run
| programster/tutorial-redis-cache | Redis/start-redis-container.sh | Shell | mit | 194 |
#!/usr/bin/env bash
## =======================================================================================
# File: setup-git.sh
# Author: Cashiuus
# Created: 16-Dec-2021 Revised:
##-[ Copyright ]--------------------------------------------------------------------------
# MIT License ~ http://opensource.org/licenses/MIT
## =======================================================================================
__version__="0.0.1"
__author__="Cashiuus"
## ==========[ TEXT COLORS ]============= ##
RESET="\033[00m" # Normal
GREEN="\033[01;32m" # Success
YELLOW="\033[01;33m" # Warnings (some terminals its yellow)
RED="\033[01;31m" # Errors
BLUE="\033[01;34m" # Headings
PURPLE="\033[01;35m" # Other
GREY="\e[90m" # Subdued Text
BOLD="\033[01;01m" # Normal fg color, but bold
ORANGE="\033[38;5;208m" # Debugging
BGRED="\033[41m" # BG Red
BGPURPLE="\033[45m" # BG Purple
BGYELLOW="\033[43m" # BG Yellow
BGBLUE="\033[104m" # White font with blue background (could also use 44)
## =============[ CONSTANTS ]============= ##
GIT_MAIN_DIR="${HOME}/git"
GIT_DEV_DIR="${HOME}/git-dev"
function check_root() {
if [[ $EUID -ne 0 ]]; then
# If not root, check if sudo package is installed
if [[ $(which sudo) ]]; then
# This accounts for both root and sudo. If normal user, it'll use sudo.
# If you run script as root, $SUDO is blank and script will soldier on.
export SUDO="sudo"
echo -e "${YELLOW}[WARN] This script leverages sudo for installation. Enter your password when prompted!${RESET}"
sleep 1
# Test to ensure this user is able to use sudo
sudo -l >/dev/null
if [[ $? -eq 1 ]]; then
# sudo pkg is installed but current user is not in sudoers group to use it
echo -e "${RED}[ERROR]${RESET} You are not able to use sudo. Running install to fix."
read -r -t 5
install_sudo
fi
else
echo -e "${RED}[ERROR]${RESET} Can't use sudo, fix your system and try again!"
exit 1
fi
fi
}
check_root
## Check Internet Connection
echo -e "${GREEN}[*]${RESET} Checking Internet access"
for i in {1..4}; do ping -c 1 -W ${i} google.com &>/dev/null && break; done
if [[ "$?" -ne 0 ]]; then
for i in {1..4}; do ping -c 1 -W ${i} 8.8.8.8 &/dev/null && break; done
if [[ "$?" -eq 0 ]]; then
echo -e "${RED}[ERROR]${RESET} Internet partially working, DNS is failing, check resolv.conf"
exit 1
else
echo -e "${RED}[ERROR]${RESET} Internet is completely down, check IP config or router"
exit 1
fi
fi
## ========================================================================== ##
# ================================[ BEGIN ]================================ #
$SUDO apt-get -qq update
$SUDO apt-get -y install git
[[ ! -d "${GIT_MAIN_DIR}" ]] && mkdir -p "${GIT_MAIN_DIR}" 2>/dev/null
[[ ! -d "${GIT_DEV_DIR}" ]] && mkdir -p "${GIT_DEV_DIR}" 2>/dev/null
# ==========[ Configure GIT ]=========== #
echo -e "${GREEN}[+]${RESET} Now setting up Git, you will be prompted to enter your name for commit author info..."
# -== Git global config settings ==- #
echo -e -n " Git global config :: Enter your name: "
read GIT_NAME
git config --global user.name "$GIT_NAME"
echo -e -n " Git global config :: Enter your email: "
read GIT_EMAIL
git config --global user.email "$GIT_EMAIL"
git config --global color.ui auto
echo -e "${GREEN}[*]${RESET} As of Oct 1, 2020, Git has changed default branch to 'main'"
echo -e "${GREEN}[*]${RESET} Therefore, setting your git config default branch to 'main' now"
git config --global init.defaultBranch main
# Set the previously-default setting to suppress warnings and make this the new default
git config --global pull.rebase false
# Git Aliases Ref: https://git-scm.com/book/en/v2/Git-Basics-Git-Aliases
# Other settings/standard alias helpers
git config --global alias.co checkout
git config --global alias.br branch
git config --global alias.ci commit
git config --global alias.st status
# Git short status
git config --global alias.s 'status -s'
# Create custom unstage alias - Type: git unstage fileA (same as: git reset HEAD -- fileA)
git config --global alias.unstage 'reset HEAD --'
# Show the last commit (Type: git last)
git config --global alias.last 'log -1 HEAD'
# My Custom Git Aliases
# TODO: Test if this works correctly, it should simply add --recursive to every clone
# The reason for --recursive is for git projects with submodules, which don't clone by default
#git config --global alias.clone 'clone --recursive'
# Other alias ideas:
# https://majewsky.wordpress.com/2010/11/29/tip-of-the-day-dont-remember-git-clone-urls/
# ========[ SSH Key Integrations ] ======== #
# If you need to create a new SSH key, you can do it via:
#ssh-keygen -t rsa -C "<your_email>" -f ~/.ssh/<key_filename>
# To change your pw/passphrase on an existing ssh key, do that via:
#ssh-keygen -p -f ~/.ssh/<your_key>
echo -e "${GREEN}[*]${RESET} Listing your .ssh directory contents for the next input request"
ls -al "${HOME}/.ssh/"
echo -e "\n"
#echo -e -n "${YELLOW}[INPUT]${RESET} Git :: Enter your current Github SSH Key full file path: "
#read GIT_SSH_KEY
found=false
while [[ ! $finished ]]; do
read -r -e -p " Please enter your current Github SSH key absolute file path: " GIT_SSH_KEY
if test -e "${GIT_SSH_KEY}"; then
finished=true
break
else
echo -e "${YELLOW}[-]${RESET} Provided file path is invalid, try again!\n"
fi
done
echo -e "${ORANGE}[DEBUG] GIT_SSH_KEY is: $GIT_SSH_KEY ${RESET}"
if [[ -e "${GIT_SSH_KEY}" ]]; then
# Ensure it has the right permissions
chmod 0400 "${GIT_SSH_KEY}"
ssh-agent -s || $(eval ssh-agent)
ssh-add "${GIT_SSH_KEY}" || echo -e "${RED}[ERROR]${RESET} Failed to add SSH key to ssh-agent, add it manually later."
file="${HOME}/.ssh/config"
if [[ ! -e "${file}" ]]; then
cat <<EOF > "${file}"
Host github
Hostname github.com
User git
PreferredAuthentications publickey
IdentityFile ${GIT_SSH_KEY}
Host gitlab
Hostname gitlab.com
User git
PreferredAuthentications publickey
IdentityFile ${GIT_SSH_KEY}
# Place additional host aliases here, such as for work if it uses a different key
# Usage: git clone git@github_work:user/repo.git
#Host github_work
# User git
# PreferredAuthentications publickey
# IdentityFile ~/.ssh/work_key
EOF
# Test the connection
echo -e "${GREEN}[*]${RESET} Testing your git connection..."
ssh -T git@github
else
echo -e "${YELLOW}[WARN]${RESET} File ~/.ssh/config already exists! Fix it manually!"
fi
else
echo -e "${YELLOW}[WARN]${RESET} SSH Key provided is not a valid file, so fix and try again!"
exit 1
fi
# ==========[ Add GIT Files ]=========== #
file="${HOME}/.gitexcludes"
if [[ ! -f "${file}" ]]; then
cat <<EOF > "${file}"
# Octocat Recommendations: https://gist.github.com/octocat/9257657
# Git gitignore Manual: http://git-scm.com/docs/gitignore
# gitignore Example Files: https://github.com/github/gitignore
# This can also be a .gitignore_global file I believe
# === OS X ===
.DS_Store
.Spotlight-V100
.Trashes
# === DB Files ===
*.sqlite3
# === VI Swap Files ===
.swp
# === Misc ===
*.cap
*.local
*.log
*.ovpn
*.pyc
EOF
fi
# ====[ Configure SSH-agent to run automatically ]=== #
# Reference: https://docs.github.com/en/authentication/connecting-to-github-with-ssh/working-with-ssh-key-passphrases
file="${HOME}/.sshagent"
cat <<EOF > "${file}"
#
#
# Note: ~/.ssh/environment should not be used, as it
# already has a different purpose in SSH.
env="${HOME}/.ssh/agent.env"
# Note: Don't bother checking SSH_AGENT_PID. It's not used
# by SSH itself, and it might even be incorrect
# (for example, when using agent-forwarding over SSH).
agent_is_running() {
if [ "$SSH_AUTH_SOCK" ]; then
# ssh-add returns:
# 0 = agent running, has keys
# 1 = agent running, no keys
# 2 = agent not running
ssh-add -l >/dev/null 2>&1 || [ $? -eq 1 ]
else
false
fi
}
agent_has_keys() {
ssh-add -l >/dev/null 2>&1
}
agent_load_env() {
. "${env}" >/dev/null
}
agent_start() {
(umask 077; ssh-agent >"$env")
. "$env" >/dev/null
}
if ! agent_is_running; then
agent_load_env
fi
# if your keys are not stored in ~/.ssh/id_rsa.pub or ~/.ssh/id_dsa.pub, you'll need
# to paste the proper path after ssh-add
if ! agent_is_running; then
agent_start
ssh-add
elif ! agent_has_keys; then
ssh-add
fi
unset env
EOF
# Determine user's active shell to update the correct resource file
if [[ "${SHELL}" == "/usr/bin/zsh" ]]; then
SHELL_FILE=~/.zshrc
elif [[ "${SHELL}" == "/bin/bash" ]]; then
SHELL_FILE=~/.bashrc
else
# Just in case I add other shells in the future
SHELL_FILE=~/.bashrc
fi
# Add source for .sshagent file so it loads for each new session
grep -q 'source "${HOME}/.dotfiles/bash/.bash_sshagent' "${SHELL_FILE}" \
|| echo '[[ -s "${HOME}/.sshagent" ]] && source "${HOME}/.sshagent"' >> "${SHELL_FILE}"
# -- Finished - Script End -----------------------------------------------------
function ctrl_c() {
# Capture pressing CTRL+C during script execution to exit gracefully
# Usage: trap ctrl_c INT
echo -e "${GREEN}[*] ${RESET}CTRL+C was pressed -- Shutting down..."
trap finish EXIT
}
# -- Git Setup Notes -----------------------------------------------------------
#
# How to change existing repo's from http to ssh
# cd ~/git/dir
# git remote -v
# git remote set-url origin [email protected]:<forked_user>/<forked_repo>
# git remote set-url upstream [email protected]:<orig_user>/<orig_repo>
# git remote -v
# git pull
#
#
| Cashiuus/penprep | system-setup/linux/setup-git.sh | Shell | mit | 9,840 |
#!/bin/sh
# file: implement.sh
#
# (c) Copyright 2008 - 2011 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#
#-----------------------------------------------------------------------------
# Script to synthesize and implement the RTL provided for the clocking wizard
#-----------------------------------------------------------------------------
# Clean up the results directory
rm -rf results
mkdir results
# Copy unisim_comp.v file to results directory
cp $XILINX/verilog/src/iSE/unisim_comp.v ./results/
# Synthesize the Verilog Wrapper Files
echo 'Synthesizing Clocking Wizard design with XST'
xst -ifn xst.scr
mv Clock70HMz_exdes.ngc results/
# Copy the constraints files generated by Coregen
echo 'Copying files from constraints directory to results directory'
cp ../example_design/Clock70HMz_exdes.ucf results/
cd results
echo 'Running ngdbuild'
ngdbuild -uc Clock70HMz_exdes.ucf Clock70HMz_exdes
echo 'Running map'
map -timing Clock70HMz_exdes -o mapped.ncd
echo 'Running par'
par -w mapped.ncd routed mapped.pcf
echo 'Running trce'
trce -e 10 routed -o routed mapped.pcf
echo 'Running design through bitgen'
bitgen -w routed
echo 'Running netgen to create gate level model for the clocking wizard example design'
netgen -ofmt verilog -sim -sdf_anno false -tm Clock70HMz_exdes -w routed.ncd routed.v
cd ..
| aj-michael/Digital-Systems | Lab4-Part2-RAMwithHyperTerminalDisplay/ipcore_dir/Clock70HMz/implement/implement.sh | Shell | mit | 3,428 |
#!/bin/bash
wget https://s3-us-west-2.amazonaws.com/cities-gem/cities.tar.gz
tar -xzf cities.tar.gz
rm cities.tar.gz | RantGames/skirmish | pull_city_data.sh | Shell | mit | 116 |
#!/BIN/BASH
trap 'kill %1' SIGINT
cd server/
npm start &
cd ../client/
npm start | GabrielQDuarte/Series-MEAN2 | init.sh | Shell | mit | 84 |
#!/bin/sh
myrun() {
echo '```console'
echo "$@"
shift 1
"$@"
echo '```'
echo
}
cat <<EOF
# TryTLS testing with Ubuntu
We chose Ubuntu 12.04, 14.04 and 16.04 LTS releases for this TryTLS-shootout
based on the [Ubuntu release end of life](http://www.ubuntu.com/info/release-end-of-life).
\`\`\`console
docker run -ti --rm ubuntu-14.04
\`\`\`
EOF
myrun \# grep DISTRIB_DESCRIPTION /etc/lsb-release
cat <<EOF
<!-- markdownlint-disable MD013 -->
python2-requests | python2-urllib2 | python3-urllib | go-nethttp | java-https | java-net | php-file-get-contents
---------------- | --------------- | -------------- | ---------- | ---------- | -------- | ---------------------
? | ? | ? | ? | ? | ? | ?
## python2-requests
EOF
myrun \# python --version
myrun \# trytls https python python2-requests/run.py
cat <<EOF
## python2-urllib2
EOF
myrun \# python --version
myrun \# trytls https python python2-urllib2/run.py
cat <<EOF
## python3-urllib
EOF
myrun \# python3 --version
myrun \# trytls https python3 python3-urllib/run.py
cat <<EOF
## go-nethttp
EOF
myrun \# go version
myrun \# trytls https go-nethttp/run
cat <<EOF
## java-https
EOF
myrun \# java -version
myrun \# trytls https java -classpath java-https Run
cat <<EOF
## java-net
EOF
myrun \# java -version
myrun \# trytls https java -classpath java-net Run
cat <<EOF
## php-file-get-contents
EOF
myrun \# php --version | sed -e 's/ *$//'
myrun \# trytls https php php-file-get-contents/run.php
cat <<EOF
<!-- markdownlint-enable MD013 -->
EOF
| ouspg/trytls | shootout/ubuntu-14.04/shootout.sh | Shell | mit | 1,602 |
#!/bin/sh
# For installation and usage, please refer to my blog post:
# http://derekstavis.github.io/posts/creating-a-installer-using-inno-setup-on-linux-and-mac-os-x/
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Derek Willian Stavis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
SCRIPT=$1
INNO_BIN="Inno Setup 5/ISCC.exe"
# Check if variable is set
[ -z "$SCRIPT" ] && { echo "Usage: $0 <SCRIPT_NAME>"; echo; exit 1; }
# Check if filename exist
[ ! -f "$SCRIPT" ] && { echo "File not found. Aborting."; echo; exit 1; }
# Check if Wine is present
command -v wine >/dev/null 2>&1 || { echo >&2 "Wine doesn't seem to be installed. Aborting."; echo; exit 1; }
# Get Program Files path via Wine command prompt
PROGRAMFILES=$(wine cmd /c 'echo %PROGRAMFILES%' 2>/dev/null)
# Translate Windows path to absolute Unix path
PROGFILES_PATH=$(winepath -u "${PROGRAMFILES}" 2>/dev/null)
# Get inno setup path
INNO_PATH="${PROGFILES_PATH%?}/${INNO_BIN}"
# Translate Unix script path to Windows path
SCRIPT=$(winepath -w "$SCRIPT" 2> /dev/null)
# Check if Inno Setup is installed into Wine
[ ! -f "$INNO_PATH" ] && { echo "Install Inno Setup 5 Quickstart before running this script."; echo; exit 1; }
# Compile!
wine "$INNO_PATH" "$SCRIPT"
| idleberg/InnoSetup-Sublime-Text | scripts/build.sh | Shell | mit | 2,252 |
#!/bin/sh
# From https://gist.githubusercontent.com/jace/88b81c51cd8044409ddbce97d582eaac/raw/eb49020ca2e896aea58549a5581f159340caafcb/denoise.sh :
# 1. extract audio from all videos (assuming .mp4 videos).
for FILE in *.mp4; do ffmpeg -i "$FILE" "${FILE%%.mp4}.wav"; done
# for FILE in *.m4a; do ffmpeg -i $FILE ${FILE%%.mp4}.wav; done
# 2. use the first second of the first audio file as the noise sample.
# sox `ls *.wav | head -1` -n trim 0 1 noiseprof noise.prof
for FILE in *.wav; do sox "$FILE" -n trim 0 1 noiseprof "${FILE%%.wav}.noise.prof"; done
# Replace with a specific noise sample file if the first second doesn't work for you:
# sox noise.wav -n noiseprof noise.prof
# 3. clean the audio with noise reduction and normalise filters.
# for FILE in *.wav; do sox -S --multi-threaded --buffer 131072 $FILE ${FILE%%.wav}.norm.wav noisered noise.prof 0.21 norm; done
for FILE in *.wav; do sox -S --multi-threaded --buffer 131072 "$FILE" "${FILE%%.wav}.norm.wav" noisered "${FILE%%.wav}.noise.prof" 0.21 norm; done
# 4. re-insert audio into the videos.
# If you need to include an audio offset (+/- n seconds), add parameter "-itsoffset n" after the second -i parameter.
# for FILE in *.norm.wav; do ffmpeg -i ${FILE%%.norm.wav}.mp4 -i $FILE -c:v copy -c:a aac -strict experimental -map 0:v:0 -map 1:a:0 ${FILE%%.norm.wav}.sync.mp4; done
# 5. That's it. You're done!
###
# See also http://www.zoharbabin.com/how-to-do-noise-reduction-using-ffmpeg-and-sox/ :
# 1. Split the audio and video streams into 2 separate files:
# The VIDEO stream: ffmpeg -i input.mp4 -sameq -an tmpvid.mp4
# The AUDIO stream: ffmpeg -i input.mp4 -sameq tmpaud.wav
# 2. Generate a sample of noise from the audio of the file:
# ffmpeg -i input.mp4 -vn -ss 00:00:00 -t 00:00:01 noiseaud.wav
# -ss: the time offset from beginning. (h:m:s.ms).
# -t duration: record or transcode duration seconds of audio/video.
# Choose a segment of the audio where there’s no speech, only noise (e.g. speaker was silent for a sec).
# 3. Generate a noise profile in sox:
# sox noiseaud.wav -n noiseprof noise.prof
# 4. Clean the noise samples from the audio stream:
# sox tmpaud.wav tmpaud-clean.wav noisered noise.prof 0.21
# Change 0.21 to adjust the level of sensitivity in the sampling rates (I found 0.2-0.3 often provides best result).
# 5. Merge the audio and video streams back together:
# ffmpeg -i tmpaud-clean.wav -i tmpvid.mp4 -sameq vid.mp4
| tom-weatherhead/bash-scripts | denoise_ffmpeg_sox.sh | Shell | mit | 2,441 |
#!/bin/sh
# Depends on ApacheBench https://httpd.apache.org/docs/2.2/programs/ab.html
#
# On Debian
# ----------
# sudo apt-get install apache2-utils
#
# Included on Mac OS (nothing to do)
#
ab -n 100000 -c 10 -k "http://localhost:8090/search?q=Kosmos"
| jaimeagudo/wikimediasearch-scala | benchmark/test_ab.sh | Shell | mit | 255 |
#!/bin/bash
echo "================================================================================"
echo " Installing Nginx "
echo "================================================================================"
apt-get install -y nginx
echo "================================================================================"
echo " Building Test Page "
echo "================================================================================"
mkdir -p /var/www/html/$APPNAME/current/public
cat <<EOF > /var/www/html/$APPNAME/current/public/index.php
<?php echo ucase($APPNAME); ?>
EOF
echo "================================================================================"
echo " Configuring Nginx "
echo "================================================================================"
cat <<EOF > /etc/nginx/sites-available/default
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /var/www/html/$APPNAME/current/public;
index index.php index.html index.htm;
server_name localhost;
location / {
try_files \$uri \$uri/ =404;
}
error_page 404 /404.html;
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /var/www/html/$APPNAME/current/public;
}
location ~ \\.php\$ {
try_files \$uri =404;
fastcgi_split_path_info ^(.+\\.php)(/.+)\$;
fastcgi_pass unix:/var/run/php5-fpm.sock;
fastcgi_index index.php;
include fastcgi_params;
}
}
EOF
echo "================================================================================"
echo " Restarting Nginx "
echo "================================================================================"
service nginx restart
| CompanyMate/deploy | scripts/nginx.sh | Shell | mit | 1,682 |
#!/bin/sh
echo "Stopping galaxy daemon..."
cd ${GALAXY_HOME}
GALAXY_RUN_ALL=1 sh run.sh --stop-daemon
| crukci-bioinformatics/galaxy | admin/build/scripts/stop.sh | Shell | mit | 103 |
#!/usr/bin/env bash
set -euo pipefail
function get_form_token {
site=$1
problem=$2
curl -s "https://www.udebug.com/$site/$problem" | \
grep -o "form-[a-zA-Z0-9_-]\{43\}" | \
sed '1d'
}
function get_output {
site="$1"
problem="$2"
input="$3"
form_token="$4"
response_html=$(mktemp)
curl -s "https://www.udebug.com/$site/$problem" \
-H 'User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' \
-H 'Accept-Language: en-US,en;q=0.5' \
--compressed \
-H 'Content-Type: application/x-www-form-urlencoded' \
-H 'Origin: https://www.udebug.com' \
-H 'DNT: 1' \
-H 'Connection: keep-alive' \
-H "Referer: https://www.udebug.com/$site/$problem" \
-H 'Cookie: has_js=1' \
-H 'Upgrade-Insecure-Requests: 1' \
--data 'problem_nid=818685' \
--data-urlencode "input_data=$input" \
--data 'node_nid=' \
--data 'op=Get+Accepted+Output' \
--data 'output_data=' \
--data 'user_output=' \
--data "form_build_id=$form_token" \
--data 'form_id=udebug_custom_problem_view_input_output_form' > "$response_html"
hxextract textarea "$response_html" 2> /dev/null |\
hxselect 'textarea#edit-output-data' |\
sed 's/<[^>]*>//g'
rm "$response_html"
}
function check_file {
file="$1"
if [ ! -f "$file" ]; then
echo "file $1 does no exist"
exit 1
fi
}
function help () {
echo "usage: $(basename "$0") <options>"
echo
echo " -s | --site <programming contest site>"
echo " -p | --problem <problem code>"
echo " -i | --input-file <input file>"
echo " -m | --multiple-input-files"
echo " -l | --split-lines"
echo " -h | --help"
echo
}
split_lines=0
single_input_file=0
multiple_input_files=0
while [ $# -gt 0 ]; do
case $1 in
-h|--help)
help
exit 0
;;
-p|--problem)
shift
problem_id="$1"
shift
;;
-s|--site)
shift
site="${1:-URI}"
shift
;;
-i|--input-file)
shift
single_input_file=1
input_file="$1"
shift
;;
-m|--multiple-input-files)
multiple_input_files=1
shift
;;
-l|--split-lines)
shift
split_lines=1
split_lines_count="$1"
shift
;;
*)
echo "$1 is an invalid option."
exit 1
;;
esac
done
if [ "$single_input_file" -eq 1 ] && [ "$multiple_input_files" -eq 1 ]; then
echo "You can not use -i and -m at the same time! To have more info, run:"
echo ""
echo "$(basename "$0") --help"
exit 1
fi
if [ "$single_input_file" -eq 1 ]; then
check_file "$input_file"
if [ "$split_lines" -eq 0 ]; then
form_token="$(get_form_token "$site" "$problem_id")"
get_output "$site" "$problem_id" "$(cat "$input_file")" "$form_token"
else
while read -r line; do
if [ "$split_lines_count" != 1 ]; then
for _ in $(seq 1 $((split_lines_count - 1))); do
read -r next_line
line="$(echo -e "${line}\n${next_line}")"
done
fi
form_token="$(get_form_token "$site" "$problem_id")"
get_output "$site" "$problem_id" "$line" "$form_token"
done < "$input_file"
fi
fi
if [ "$multiple_input_files" -eq 1 ]; then
for input_file in in-??.txt; do
output_suffix="$(echo "$input_file" | grep -o '[0-9]\{2\}')"
form_token="$(get_form_token "$site" "$problem_id")"
get_output "$site" "$problem_id" "$(cat "$input_file")" "$form_token" > "out-${output_suffix}.txt"
done
fi
| deniscostadsc/playground | scripts/udebug.sh | Shell | mit | 4,008 |
#! /bin/sh
rm -rf ./dist
npm publish
python setup.py sdist
twine upload dist/*
| sjml/hulkify | publish.sh | Shell | mit | 83 |
# generated from catkin/cmake/env-hooks/05.catkin-test-results.sh.develspace.in
export CATKIN_TEST_RESULTS_DIR="/home/viki/my_ws/src/gpsd_viewer/build/test_results"
export ROS_TEST_RESULTS_DIR="$CATKIN_TEST_RESULTS_DIR"
| ENSTA-Bretagne-Guerledan-BoiteNoire/ROS-ground-station | build/devel/etc/catkin/profile.d/05.catkin-test-results.sh | Shell | mit | 221 |
#!/bin/bash
# Void Linux inofficial package overlay script
# Info: https://github.com/robotanarchy/void-packages-bleeding-edge
#
# FUNCTIONS
#
function STEP
{
echo "$(tput bold)::: $1$(tput sgr0)"
}
# $1: package name
# $2: optional parameters
function update_repo()
{
STEP "Updating $1 sources..."
while IFS= read -r s; do
local type="$(echo $s | cut -d ' ' -f 2)"
local repo="$(echo $s | cut -d ' ' -f 3)"
local subf="$(echo $s | cut -d ' ' -f 4)"
local path="cache/$1/$subf"
local curr="$PWD"
echo "Updating $path..."
mkdir -p "$path"
cd "$path"
case "$type" in
"git" )
require_progs git perl
if [ -e ".git" ]; then
git stash
git pull
else
git clone $2 "$repo" .
git submodule init
fi
git submodule update
;;
* )
echo "$1: sources type $type not implemented!"
exit 1
;;
esac
cd "$curr"
done < <(grep "^$1 " sources)
}
function merge_templates()
{
STEP "Merging srcpkgs and shlibs into void-packages..."
local srcpkgs="cache/void-packages/srcpkgs"
for pkg in srcpkgs/*; do
echo "merging $pkg...."
rm -rf "$srcpkgs/$pkg"
cp -r "$pkg" "$srcpkgs/"
done
echo "merging shlibs..."
local shlibs="cache/void-packages/common/shlibs"
mv "${shlibs}" "${shlibs}_"
cat "shlibs" "${shlibs}_" > "${shlibs}"
rm "${shlibs}_"
}
# $1: package name
function update_template()
{
local s="$(grep "^$1 " sources | head --lines 1)"
[[ "$s" == "" ]] && return 0
local tmpl="cache/void-packages/srcpkgs/$1/template"
if ! grep -q "version=LATEST" "$tmpl"; then
echo "This package is in the sources file, but does not have"
echo "a 'version=LATEST' line in the template. Please fix this!"
echo "template path: srcpkgs/$1/template"
exit 1
fi
local type="$(echo $s | cut -d ' ' -f 2)"
local repo="$(echo $s | cut -d ' ' -f 3)"
local subf="$(echo $s | cut -d ' ' -f 4)"
local path="cache/$1/$subf"
local curr="$PWD"
cd "$path"
case "$type" in
"git" )
local v="$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
;;
esac
cd "$curr"
echo "$1 version: $v"
sed --in-place -s "s/version=LATEST/version=$v/" "$tmpl"
}
# $1: package name
function copy_sources()
{
[ ! -d "cache/$1" ] && return 0
echo "copying sources/$1 to masterdir/sources/..."
local sources="cache/void-packages/masterdir/sources"
mkdir -p "$sources"
[ -e "$sources/$1" ] && rm -rf "$sources/$1"
cp -rf "cache/$1" "$sources"
}
# $1: dependency, like name, name>1, name>=1, name<1
function pkg_name()
{
[[ "$1" == *">"* ]] && echo "$(echo $1 | cut -d '>' -f 1)" && return
[[ "$1" == *"<"* ]] && echo "$(echo $1 | cut -d '<' -f 1)" && return
echo "$1"
}
# $1: program name (git, perl, ...)
function require_progs()
{
for prog in "$@"; do
type $1 >/dev/null 2>&1 && continue
echo "ERROR: $prog not found, please install it and try again!"
exit 1
done
}
#
# MAIN CODE
#
if [ "$#" = 0 ]; then
echo "Syntax:"
echo " $(basename $0) overlay-package1 [overlay-package2 [...]]"
echo "For example:"
echo " $(basename $0) sway-git"
exit 1
fi
# Check template files
for pkg in "$@"; do
[ -e "srcpkgs/$pkg/template" ] && continue
echo "ERROR: File not found: srcpkgs/$pkg/template"
exit 1
done
update_repo void-packages --depth=1
merge_templates
# Parse dependencies
OVERLAY_ALL_PKGS=""
for pkg in "$@"
do
STEP "Parsing dependencies of $pkg..."
if [[ "$OVERLAY_ALL_PKGS" != *"$pkg"* ]]; then
OVERLAY_ALL_PKGS="$OVERLAY_ALL_PKGS $pkg"
fi
for dep in $(source "srcpkgs/$pkg/template"; \
echo $depends $makedepends $hostmakedepends) ;
do
name="$(pkg_name $dep)"
if [ -e "srcpkgs/$name" ]; then
echo "found overlay dependency $name"
if [[ "$OVERLAY_ALL_PKGS" != *"$name"* ]]; then
OVERLAY_ALL_PKGS="$OVERLAY_ALL_PKGS $name"
fi
elif [ ! -e "cache/void-packages/srcpkgs/$name" ]; then
echo "ERROR: Dependency $dep (name parsed as $name) not" \
" found in overlay or void packages!"
exit 1
fi
done
done
for pkg in $OVERLAY_ALL_PKGS; do
update_repo "$pkg"
update_template "$pkg"
copy_sources "$pkg"
done
# create a convinience symlink to binpkgs
if [ ! -e "binpkgs" ]; then
echo "Creating binpkgs symlink..."
mkdir -p cache/void-packages/hostdir/binpkgs
ln -sf cache/void-packages/hostdir/binpkgs .
fi
STEP "Done!"
echo "You can build and install your packages now:"
echo " ./xbps-src binary-bootstrap # (only required the first time)"
echo " ./xbps-src pkg $@"
echo " sudo xbps-install --repository=binpkgs $@"
| robotanarchy/void-packages-bleeding-edge | overlay.sh | Shell | cc0-1.0 | 4,491 |
#!/bin/sh
######################################################################
#
# @file extrepofcns.sh
#
# @brief Documentation goes here.
#
# @version $Rev$ $Date$
#
# Copyright © 2012-2017, Tech-X Corporation, Boulder, CO.
# See LICENSE file (EclipseLicense.txt) for conditions of use.
#
######################################################################
if test -e $SCRIPT_DIR/bilder/runnr/runnrfcns.sh; then
source $SCRIPT_DIR/bilder/runnr/runnrfcns.sh
fi
host=$UQMAILHOST
#---------------------------------------------------------------
#
# checkout or update a repository
#
# Args:
# 1: The local directory where the repo will (or already does) reside
# 2: The repository URL
# 3: The repo branch to check out (git only, optional)
#
# Named args (must come first):
# -g: We are working with (or creating) a git repo (svn is the default)
#
# Returns 0 on success non-zero on failure
#
getRepo() {
# Get options
local repotype="SVN"
while test -n "$1"; do
case "$1" in
-g)
repotype="GIT"
;;
*)
break
;;
esac
shift
done
local localDir=$1
local repoUrl=$2
local gitBranch=$3
if test -d $localDir; then
cd $localDir
if test $repotype == "SVN"; then
echo "Running svn up on $PWD"
svn up
res=$?
elif test $repotype == "GIT"; then
echo "Running git pull on $PWD"
git pull
res=$?
else
echo "Unknown repository type, $repotype, do not know how to update"
res=1
fi
cd ..
else
if test $repotype == "SVN"; then
svn co $repoUrl $localDir
res=$?
elif test $repotype == "GIT"; then
if test -z "$gitBranch"; then
git clone $repoUrl $localDir
res=$?
else
git clone --no-checkout $repoUrl $localDir
res=$?
if test $res = 0; then
cd $localDir
res=$?
fi
if test $res = 0; then
git checkout -b "${gitBranch}" "origin/${gitBranch}"
res=$?
cd ..
fi
fi
else
echo "Unknown repository type, $repotype, do not know how to create"
res=1
fi
fi
return $res
}
#
# checkout or update a list of repositories
# Requires that repo_root be set to the head of all repositories in the list
# Assumes that each repo has a trunk to check out.
#
# Args:
# 1: A space-separated list of repository names
#
getRepos() {
local repoList=$1
for repo_name in $repoList; do
repo=${repo_root}"/"${repo_name}"/trunk"
getRepo $repo_name $repo
done
}
getResults() {
testdir=$1
cd $1
resrootdir=`svn info | grep URL: | cut -f2- -d: | sed s@/code@/results@g | sed s/tests/results/g`
resname=`echo $testdir | sed s/tests/results/g `
hostversions=`svn ls $resrootdir | grep $host `
if test -z $hostversions; then
echo "No accepted results found for $testdir on machine $host"
echo Searched: $resrootdir
else
echo "Available versions: $hostversions"
fi
for hv in $hostversions; do
svn co $resrootdir/$hv $resname-$hv
done
cd ..
}
| Tech-XCorp/bilder | extrepofcns.sh | Shell | epl-1.0 | 3,083 |
#!/bin/sh
### Following Assumptions are made in this script
grep "xDSL_MODE_VRX" /flash/rc.conf
if [ $? != "0" ] ; then
echo "#<< xDSL_MODE_VRX" >> /flash/rc.conf
echo "xDSL_MODE_VRX=vdsl" >> /flash/rc.conf
echo "#>> xDSL_MODE_VRX" >> /flash/rc.conf
echo "#<< ACCELERATION" >> /flash/rc.conf
echo "ACCELERATION=acc_no" >> /flash/rc.conf
echo "#>> ACCELERATION" >> /flash/rc.conf
echo " "
echo "No mode was set , set VDSL mode as default"
/usr/sbin/savecfg.sh
fi
. /flash/rc.conf
echo 1 > /proc/sys/net/netfilter/nf_conntrack_tcp_be_liberal
echo 3000 >/proc/sys/vm/min_free_kbytes # -- OOM issue
echo 10 >/proc/sys/net/core/netdev_budget # -- bad voice quality
case "$xDSL_MODE_VRX" in
vdsl)
echo "Setting in flash is VDSL mode"
case "$ACCELERATION" in
acc_no)
insmod /lib/modules/2.6.20.19/ifxmips_ptm.ko
sleep 1
brctl setfd br0 1
brctl addif br0 eth0 2>/dev/null
ifconfig br0 192.168.1.1
ifconfig eth0 0.0.0.0 up
ifconfig eth1 0.0.0.0
ifconfig ptm0 up
;;
acc_yes)
echo "Enter PPA mode"
insmod /lib/modules/2.6.20.19/ifxmips_ppa_datapath_vr9_e5.ko
insmod /lib/modules/2.6.20.19/ifxmips_ppa_hal_vr9_e5.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api_proc.ko
brctl setfd br0 1
brctl addif br0 eth0
/sbin/ifconfig eth1 0.0.0.0 down
/sbin/ifconfig eth0 0.0.0.0 up
ifconfig br0 192.168.1.1
sleep 3
echo enable > /proc/ppa/api/hook
sleep 1
ppacmd control --enable-lan --enable-wan
ppacmd addlan -i eth0
ppacmd addlan -i br0
ifconfig ptm0 2> /dev/null
if [ $? = "0" ] ; then
ppacmd addwan -i ptm0
ifconfig ptm0 up
fi
;;
*)
echo "Please specific acc_no/acc_yes"
exit 1
esac
if [ -s /opt/lantiq/bin/dsl_cpe_pipe.sh ] ; then
/opt/lantiq/bin/dsl_cpe_pipe.sh g997xtusecs 0 0 0 0 0 0 0 7
/opt/lantiq/bin/dsl_cpe_pipe.sh acs 2
fi
;;
adsl_a)
echo "Setting in flash is ADSL Annex A mode"
case "$ACCELERATION" in
acc_no)
cat /proc/modules|grep ifxmips_atm
insmod /lib/modules/2.6.20.19/ifxmips_atm.ko
/opt/lantiq/bin/dsl_cpe_pipe.sh g997xtusecs 5 0 4 0 C 1 0 0
/opt/lantiq/bin/dsl_cpe_pipe.sh acs 2
brctl setfd br0 1
brctl addif br0 eth0 2>/dev/null
ifconfig br0 192.168.1.1
ifconfig eth0 0.0.0.0 up
ifconfig eth1 0.0.0.0 down
/flash/BSP-Test-VR9/bringup_wan_pvc.sh 0 0 33 01:20:23:43:53:33
sleep 1
ifconfig nas0 up
;;
acc_yes)
insmod /lib/modules/2.6.20.19/ifxmips_ppa_datapath_vr9_a5.ko
insmod /lib/modules/2.6.20.19/ifxmips_ppa_hal_vr9_a5.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api_proc.ko
/opt/lantiq/bin/dsl_cpe_pipe.sh g997xtusecs 5 0 4 0 C 1 0 0
/opt/lantiq/bin/dsl_cpe_pipe.sh acs 2
brctl setfd br0 1
brctl addif br0 eth0 2>/dev/null
ifconfig br0 192.168.1.1
ifconfig eth0 0.0.0.0 up
ifconfig eth1 0.0.0.0 down
/flash/BSP-Test-VR9/bringup_wan_pvc.sh 0 0 33 01:20:23:43:53:33
sleep 1
echo enable > /proc/ppa/api/hook
sleep 1
ppacmd control --enable-lan --enable-wan
ppacmd addlan -i eth0
ppacmd addlan -i br0
ifconfig nas0 2> /dev/null
if [ $? = "0" ] ; then
ppacmd addwan -i nas0
ifconfig nas0 up
fi
;;
*)
echo "Please specific acc_no/acc_yes"
exit 1
esac
;;
adsl_b)
echo "Setting in flash is ADSL Annex B mode"
case "$ACCELERATION" in
acc_no)
cat /proc/modules|grep ifxmips_atm
insmod /lib/modules/2.6.20.19/ifxmips_atm.ko
/opt/lantiq/bin/dsl_cpe_pipe.sh g997xtusecs 10 0 10 0 0 4 0 0
/opt/lantiq/bin/dsl_cpe_pipe.sh acs 2
brctl setfd br0 1
brctl addif br0 eth0 2>/dev/null
ifconfig br0 192.168.1.1
ifconfig eth0 0.0.0.0 up
ifconfig eth1 0.0.0.0 down
/flash/BSP-Test-VR9/bringup_wan_pvc.sh 0 0 33 01:20:23:43:53:33
sleep 1
ifconfig nas0 up
;;
acc_yes)
insmod /lib/modules/2.6.20.19/ifxmips_ppa_datapath_vr9_a5.ko
insmod /lib/modules/2.6.20.19/ifxmips_ppa_hal_vr9_a5.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api_proc.ko
/opt/lantiq/bin/dsl_cpe_pipe.sh g997xtusecs 10 0 10 0 0 4 0 0
/opt/lantiq/bin/dsl_cpe_pipe.sh acs 2
brctl setfd br0 1
brctl addif br0 eth0 2>/dev/null
ifconfig br0 192.168.1.1
ifconfig eth0 0.0.0.0 up
ifconfig eth1 0.0.0.0 down
/flash/BSP-Test-VR9/bringup_wan_pvc.sh 0 0 33 01:20:23:43:53:33
sleep 1
echo enable > /proc/ppa/api/hook
sleep 1
ppacmd control --enable-lan --enable-wan
ppacmd addlan -i eth0
ppacmd addlan -i br0
ifconfig nas0 2> /dev/null
if [ $? = "0" ] ; then
ppacmd addwan -i nas0
ifconfig nas0 up
fi
;;
*)
echo "Please specific acc_no/acc_yes"
exit 1
esac
;;
eth_wan)
echo "Setting in flash is Ethernet WAN mode"
insmod /lib/modules/2.6.20.19/ifxmips_ppa_datapath_vr9_d5.ko
insmod /lib/modules/2.6.20.19/ifxmips_ppa_hal_vr9_d5.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api.ko
insmod /lib/modules/2.6.20.19/ifx_ppa_api_proc.ko
brctl setfd br0 1
brctl addif br0 eth0
/sbin/ifconfig eth0 0.0.0.0 up
/sbin/ifconfig eth1 0.0.0.0 up
ifconfig br0 192.168.1.1
sleep 3
echo enable > /proc/ppa/api/hook
sleep 1
ppacmd control --enable-lan --enable-wan
ppacmd addlan -i eth0
ppacmd addlan -i br0
ppacmd addwan -i eth1
;;
*)
echo "No mode was found"
exit 1
esac
exit 0
| kbridgers/VOLTE4GFAX | target/linux/ltqcpe/platform_vr9/base-files/ramdisk_copy/flash/BSP-Test-VR9/bringup_xdsl_mode.sh | Shell | gpl-2.0 | 7,179 |
function selectDistro() {
local distros # The array of distro names
local distroSelected # The name of the distro the user selects
local distroName
# Add a custom 'distro'
distros+=("Local File")
while read -r distroName; do
distroName="${distroName/images/}"
distroName="${distroName///}"
distros+=("$distroName")
done < <(ls -1d images/*/)
echo "Select distro: " >&2
select opt in "${distros[@]}"; do
distroSelected="${distros[$(($REPLY-1))]}"
if [[ "$distroSelected" = "" ]]; then
echo "Invalid selection" >&2
else
break
fi
done
echo "$distroSelected"
}
function selectDistroVersion() {
local distroName="$1" # Which distro to get the versions for
local distroVersions # The array of versions available for this distro
local distroVersion
local distroVersionSelected
echo "Select $distroName version:" >&2
while read -r distroVersion; do
distroVersion="${distroVersion/images\/$distroName/}"
distroVersion="${distroVersion///}"
distroVersions+=("$distroVersion")
done < <(ls -1d images/"$distroName"/*/)
select opt in "${distroVersions[@]}"; do
distroVersionSelected="${distroVersions[$(($REPLY-1))]}"
if [[ "$distroVersionSelected" = "" ]]; then
echo "Invalid selection" >&2
else
break
fi
done
echo "$distroVersionSelected"
} | moebrowne/RPi-image-manager | utils/select.sh | Shell | gpl-2.0 | 1,488 |
#!/bin/bash
# ntsk13 for build forlinux kernel
export PATH=/usr/local/arm/arm-none-linux-gnueabi/bin:$PATH
make clean
make | tee build.log
| NTSK13/forlinux | build_kernel.sh | Shell | gpl-2.0 | 143 |
#!/bin/bash
MONGOOSE_TGZ=mongooseim.tar.gz
BUILD_PATH=_build/prod/rel/mongooseim
tar czh --transform="s,${BUILD_PATH},mongooseim,S" -f $MONGOOSE_TGZ ${BUILD_PATH}
export BUILDS=`pwd`
# We use output of generate_vsn, because it does not contain illegal characters, returns
# git tag when building from tag itself, and is unique in any other case
VERSION=`tools/generate_vsn.sh`
DOCKERHUB_TAG=${VERSION}
GIT_REF=`git rev-parse HEAD`
GIT_COMMIT_MSG=`git log --format=%B -n 1 HEAD`
if [ -n "$CIRCLE_PULL_REQUEST" ]; then
# CircleCI doesn't provide PR number in env. var., so we need to extract it from PR URL
# May not work with different service than GitHub
# TODO: Possibly change it to something else during Tide integration
PR_NUMBER=${CIRCLE_PULL_REQUEST##*/}
DOCKERHUB_TAG="PR-${PR_NUMBER}"
elif [ ${CIRCLE_BRANCH} == 'master' ]; then
DOCKERHUB_TAG="latest";
fi
# TODO: Add DOCKERHUB=${VERSION} when CircleCI handles weekly builds as well
echo "Tag: ${DOCKERHUB_TAG}"
IMAGE_TAG=${DOCKERHUB_REPO}/mongooseim:${DOCKERHUB_TAG}
git clone https://github.com/esl/mongooseim-docker.git
cd mongooseim-docker
git checkout 318e1ee6582e7473303a2cd0b4baca0c9c09a1be
cp ../${MONGOOSE_TGZ} member
docker build -f Dockerfile.member -t ${IMAGE_TAG} \
--build-arg BUILD_DATE=`date -u +"%Y-%m-%dT%H:%M:%SZ"` \
--build-arg VCS_REF=${GIT_REF} \
--build-arg VCS_REF_DESC="${GIT_COMMIT_MSG}" \
--build-arg VERSION=${VERSION} \
.
docker login -u ${DOCKERHUB_USER} -p ${DOCKERHUB_PASS}
docker push ${IMAGE_TAG}
| arcusfelis/MongooseIM | tools/circle-build-and-push-docker.sh | Shell | gpl-2.0 | 1,564 |
#!/bin/bash
[[ -z "$__main__" || "$__main__" != 'CenTwe.sh' ]] && { echo 'Can not run this file directly'; exit 1; }
# TODO:
# 1) Using local variables in function
# 2)
# ------------------------------------------------------------------------------
_iam=`whoami`
_scriptis="$1"
_username="$2"
_osarch=`arch`
# Some websites block wget, so we're going to fool them ;P
_uagent="Mozilla/5.0 (X11; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0"
declare -A imported plugin_info
declare plugin_type plugin_name
# ------------------------------------------------------------------------------
# Main function
function init.main() {
# Checking whether it's ok or not ok to proceed
init.chk_user
init.chk_deps
# Start loading the needed things
init.mktemp
init.plugins
# Start the main script
init.shell
}
# ------------------------------------------------------------------------------
# Check whether it is good or not to proceed
function init.chk_user() {
if [[ ("$_iam" == "root") && ("$_scriptis" != 'safe2run' || -z "$_username") ]]; then
echo 'You should run it as a normal user!'
echo 'Exiting..'; __q 1
elif [[ "$_iam" == "root" && "$_scriptis" == 'safe2run' && -n "$_username" ]]; then
#if id -u "$_username" >/dev/null 2>&1; then
# OR
if [[ `grep -c "^$_username:" /etc/passwd` == 0 ]]; then
echo "User: $_username doesn't exist !!"
echo 'Exiting..'; __q 1
fi
else
su -c "bash $__file__ 'safe2run' '$_iam'"
__q 0 'silent'
fi
}
# ------------------------------------------------------------------------------
# How should we proceed?
function init.chk_deps() {
# Preparing the needed variables
local vDepsList vAllPkgs vDPkg vOpts vOpt
echo '[Deps]: Checking the required dependencies..'
vDepsList=('newt' 'wget')
vAllPkgs=`rpm -qa`
# Do we have the required dependencies
for vDPkg in ${vDepsList[@]}; do
if [[ -z `echo "$vAllPkgs" | grep -i "^${vDPkg}-[0-9*]"` ]]; then
echo "[Deps]: Missing dependency '$vDPkg'"
vOpts=("Install '$vDPkg' package." "Cancel and exit.")
PS3="What would you like to do? "
select vOpt in "${vOpts[@]}"; do
case "$REPLY" in
1 ) yum.upin $vDPkg; break;;
2 ) echo "Exiting..!"; __q 1; break;;
* ) echo "Invalid option !"; continue;;
esac
done
init.chk_deps
return 0 # Avoiding the double looping for the same package
else
echo "[Deps]: '$vDPkg' is available."
fi
done
# Flushing the used variables
unset vDepsList vAllPkgs vDPkg vOpts vOpt
}
# ------------------------------------------------------------------------------
# Making & checking the existence of the temp dir we're going to use
function init.mktemp() {
[[ -z "$__tmpd__" || ! -d "$__tmpd__" ]] && __tmpd__=`mktemp -d`
[[ `pwd -P` != "$__tmpd__" ]] && cd "$__tmpd__"
}
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Loading available plugins
# TODO: Scan & load user plugins first, which they should be under:
# '~/.centwe_plugins/{applications,tweaks,fixes,repos}'
function init.plugins() {
# Preparing the needed variables
local plugin type
declare -A plugins=([app]="$__apps__" [twk]="$__twks__" [fix]="$__fixs__" [repo]="$__repo__")
# DO NOT declare with assigning initial values to a global associative array
# (it's a bug in Bash 4.2. See: http://stackoverflow.com/a/21151984)
# g: global, A: associative array
# declare -gA imported
# plugins=([app]="$__apps__" [twk]="$__twks__" [fix]="$__fixs__" [repo]="$__repo__")
for type in "${!plugins[@]}"; do
for plugin in "${plugins[$type]}/"*; do
plugin=`basename "$plugin"`;
if [[ "$plugin" != *' '* && -f "${plugins[$type]}/$plugin/run.sh" ]]; then
source "${plugins[$type]}/$plugin/run.sh"
imported[$type/$plugin]=0
fi
done
done
# Flushing the used variables
unset plugin type plugins
}
# ------------------------------------------------------------------------------
# The main shell, which will read & execute all the inserted commands by user
function init.shell() {
local cmd
while [[ true ]]; do
# echo -n "[root@CenTwe ]$ "
echo -n "CenTwe> "
# The words are assigned to sequential indexes of the array variable
# ANAME, starting at 0. All elements are removed from ANAME before the
# assignment. Other NAME arguments are ignored.
# Check: http://tldp.org/LDP/Bash-Beginners-Guide/html/sect_08_02.html
read -a cmd
case ${cmd[0]} in
app) cmd.app ${cmd[@]:1};;
twk) cmd.twk ${cmd[@]:1};;
fix) cmd.fix ${cmd[@]:1};;
repo) cmd.repo ${cmd[@]:1};;
help) cmd.help ${cmd[1]};;
clear) clear;;
quit|exit)
echo 'Goodbye!'; __q 0; break;;
*) echo 'Invalid command. Try: help'; continue;;
esac
done
}
# ------------------------------------------------------------------------------
# TODO: not finished or tested yet
function cmd.app() {
# Preparing the needed variables
local appname appfuncin
declare -a appnames=(${@:2})
case $1 in
help)
cmd.help "app"
;;
test)
[[ `type -t app.lighttable_install` == 'function' ]] && echo 'function exists' || echo 'function does not exist'
echo 'args:'
for appname in ${!appnames[@]}; do
echo "$appname : ${appnames[$appname]}"
done
# echo "appnames : $appnames"
# echo "@ : $@"
echo 'Imported:'
for f in ${!imported[@]}; do
echo "${imported[$f]} <---> $f"
done
;;
list|ls)
echo 'show available apps to be installed'
;;
install|in)
echo 'Installing the app now'
for appname in ${appnames[@]}; do
appfuncin="app.${appname}_install"
# echo "appfuncin : $appfuncin"
# echo "!appfuncin: $!appfuncin"
[[ `is_function $appfuncin` == 0 ]] && echo "$appname->$appfuncin"
done
# if [[ "$_app_status_lighttable" == "available" ]]; then
# echo -n "[APP]: Are you sure you want to install this app? [y/N]: "
# read _vyesno
# [[ "$_vyesno" == [Yy] ]] && app.lighttable_install || continue
# else
# echo "[APP]: 'Light Table' is already installed."
# fi
;;
remove|rm)
echo 'Removing the app now'
# if [[ "$_app_status_lighttable" == "installed" ]]; then
# echo -n "[APP]: Are you sure you want to remove this app? [y/N]: "
# read _vyesno
# [[ "$_vyesno" == [Yy] ]] && app.lighttable_remove || continue
# else
# echo "[APP]: 'Light Table' is not installed."
# fi
;;
update|up)
echo 'Updating the app now'
# app.lighttable_update
;;
info)
echo 'Showing app info now'
# $2 : optional argument ('force-reload')
# if it was provided, then we should re-setting app infos.
# app.lighttable_info_get $2
;;
*) continue;;
esac
}
# ------------------------------------------------------------------------------
# TODO: code it
function cmd.twk() {
return 0
}
# ------------------------------------------------------------------------------
# TODO: code it
function cmd.fix() {
return 0
}
# ------------------------------------------------------------------------------
# TODO: code it
function cmd.repoo() {
return 0
}
# ------------------------------------------------------------------------------
# Help function
function cmd.help() {
local help_file='help'
[[ -n $1 ]] && help_file+=".${1}"
help_file="$__core__/${help_file}.txt"
[[ -f "$help_file" ]] || help_file="$__core__/help.txt"
cat "$help_file"
}
# ------------------------------------------------------------------------------
# should always be used before setting or getting any value.
# $1 : type - $2 : name
function plugin.use() {
[[ -n $1 ]] && plugin_type=$1
[[ -n $2 ]] && plugin_name=$2
}
# ------------------------------------------------------------------------------
# $1 : key - $2 : value
function plugin.set() {
plugin_info[$plugin_type/$plugin_name.$1]=$2
}
# ------------------------------------------------------------------------------
function plugin.get() {
local ret=${plugin_info[$plugin_type/$plugin_name.$1]}
[[ -n "$ret" ]] && echo "$ret"
}
# ------------------------------------------------------------------------------
function plugin.need2update() {
__need2update `plugin.get 'verlocal'` `plugin.get 'veronline'`
}
# ------------------------------------------------------------------------------
function plugin.check_update() {
# This is independent
# local verlocal=`plugin.get 'verlocal'`
# local veronline=`plugin.get 'veronline'`
# if [[ `__need2update $verlocal $veronline` == 0 ]]; then
# plugin.set 'update' 'available'
# else
# plugin.set 'update' 'unavailable'
# fi
# This depends on 'plugin.need2update'
# May be we should delete 'plugin.need2update' and use previous commented code.
if [[ `plugin.need2update` == 0 ]]; then
plugin.set 'update' 'available'
else
plugin.set 'update' 'unavailable'
fi
}
# ------------------------------------------------------------------------------
function is_function() {
[[ -n $1 && `type -t $1` == 'function' ]] && echo 0 || echo 1
}
# ------------------------------------------------------------------------------
# Custom exit function
function __q() {
[[ $2 != 'silent' ]] && echo 'Cleaning up the generated temporary files..'
[[ -d "$__tmpd__" ]] && rm -rfv "$__tmpd__"
exit $1
}
# ------------------------------------------------------------------------------
# Aborting function: Should abort the sub-script process, if anything goes wrong
# TODO: see if there is a way to implement this to make force the function to exit
# instead of exiting the whole script.
function __a() {
echo "[APORTING]: $1"
return 1
}
# ------------------------------------------------------------------------------
# URL: http://stackoverflow.com/a/4025065
# We should not call it directly, instead we should use `__need2update $1 $2`
function __ver_comp () {
if [[ $1 == $2 ]]; then
return 0
fi
local OIFS=$IFS
local IFS=.
local i ver1=($1) ver2=($2)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++)); do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++)); do
if [[ -z ${ver2[i]} ]]; then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]})); then
IFS=$OIFS
return 1
fi
if ((10#${ver1[i]} < 10#${ver2[i]})); then
IFS=$OIFS
return 2
fi
done
return 0
}
# ------------------------------------------------------------------------------
# Check whether we need to update or not
# return type : echo
# return value : 0|1
# 0: yes
# 1: no
# usage __need2update local_version online_version
# ex: [[ `__need2update 1.0.1 1.0.2` == 0 ]] && echo 'need to update' || echo 'no need to update'
function __need2update() {
# $1 : installed version
# $2 : avilable version
__ver_comp $1 $2
[[ $? == 2 ]] && echo 0 || echo 1
}
# ------------------------------------------------------------------------------
function yum.up() {
yum -y update
}
# ------------------------------------------------------------------------------
function yum.in() {
# $* : All of the positional parameters, seen as a single word
# $@ : Same as $*, but each parameter is a quoted string, that is, the
# parameters are passed on intact, without interpretation or expansion.
# This means, among other things, that each parameter in the argument
# list is seen as a separate word.
# URL: http://www.tldp.org/LDP/abs/html/internalvariables.html#APPREF
yum -y install $@
}
# ------------------------------------------------------------------------------
function yum.upin() {
yum.up
yum.in $@
}
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# May be we should delete the following functions, since they're not being used.
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# echo the value of the appArrayKey
# usage: app_info app-name array-key (ex: app_info sublime2 ver_local)
function app_info() {
local app=$1
local key=$2
local ret=${app}[$key]
if [[ -n ${!ret} ]]; then
echo ${!ret}
else
# TODO: check if "$app" in plugins_array
if [[ -z $3 ]]; then
app.${app}_info_set
app_info $app $key 'avoid_loop'
fi
fi
}
# ------------------------------------------------------------------------------
# Called as: __in_array "keyword" "${array_name[@]}"
function __in_array() {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1 # not found
}
# ------------------------------------------------------------------------------
# TODO: Need to make it works
function __is_array() {
echo ''
#declare -p non-existing-var 2> /dev/null | grep -q '^declare \-a' && echo array || echo no array
}
# ------------------------------------------------------------------------------
# called: __write_file "contents" "path/to/file"
# TODO: make sure file is writable.
# TODO: if file doesn't exist, should we create it ?
function __write_file() {
if [[ -n $1 && -f $2 ]]; then
# Removing all leading and trailing whitespace and tabs
echo "$1" | sed -e 's/^[ \t]*//' > "$2"
else
echo 'Missing contents Or file does not exist'
echo 'Exiting..'; __q 1
fi
}
# ------------------------------------------------------------------------------
| DeaDSouL/CenTwe | core/core.sh | Shell | gpl-2.0 | 15,573 |
#!/bin/bash
i=1
mkdir saidas
# Executa o programa sobre todos os arquivos de entrada
for arquivo in entradas/*.txt
do
python principal.py $arquivo saidas/saida_$i.txt;
i=$((i+1))
done | celsocelante/trabgrafos | executa.sh | Shell | gpl-2.0 | 186 |
#!/usr/bin/env bash
THIS_DIR=$(cd $(dirname $0); pwd)
cd $THIS_DIR
update() {
git pull
git submodule update --init --recursive
install_rocks
}
# Will install luarocks on THIS_DIR/.luarocks
install_luarocks() {
git clone https://github.com/keplerproject/luarocks.git
cd luarocks
git checkout tags/v2.2.1 # Current stable
PREFIX="$THIS_DIR/.luarocks"
./configure --prefix=$PREFIX --sysconfdir=$PREFIX/luarocks --force-config
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
make build && make install
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting.";exit $RET;
fi
cd ..
rm -rf luarocks
}
install_rocks() {
./.luarocks/bin/luarocks install luasec
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install lbase64 20120807-3
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install luasocket
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install oauth
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install redis-lua
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install lua-cjson
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install fakeredis
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install xml
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install feedparser
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
./.luarocks/bin/luarocks install serpent
RET=$?; if [ $RET -ne 0 ];
then echo "Error. Exiting."; exit $RET;
fi
}
install() {
git pull
git submodule update --init --recursive
patch -i "patches/disable-python-and-libjansson.patch" -p 0 --batch --forward
cp patches/structures.c-area31 tg/tgl/structures.c
RET=$?;
cd tg
if [ $RET -ne 0 ]; then
autoconf -i
fi
./configure && make
RET=$?; if [ $RET -ne 0 ]; then
echo "Error. Exiting."; exit $RET;
fi
cd ..
install_luarocks
install_rocks
}
if [ "$1" = "install" ]; then
install
elif [ "$1" = "update" ]; then
update
else
if [ ! -f ./tg/telegram.h ]; then
echo "tg not found"
echo "Run $0 install"
exit 1
fi
if [ ! -f ./tg/bin/telegram-cli ]; then
echo "tg binary not found"
echo "Run $0 install"
exit 1
fi
./tg/bin/telegram-cli -k ./tg/tg-server.pub -s ./bot/bot.lua -l 1 -E $@
fi
| area31/telegram-bot | launch.sh | Shell | gpl-2.0 | 2,707 |
#!/bin/bash
#
# Do full runs (plan + run) for both credit and credit2 schedulers
#
# Usage:
# ./schedbench.sh [n]
#
# Where [n] is the run number
#
if [[ -n "$1" ]] ; then
run="$1"
else
run="test"
fi
file="${run}.bench"
if ! [[ -e $file ]] ; then
echo "Can't find benchmark file $file"
exit 1
fi
./schedbench -f ${file} plan || exit 1
for sched in credit2 credit; do
prefix="${run}.${sched}"
echo "$1 Testing $sched (prefix $prefix)"
./schedbench-prep.sh $sched || break
xl cpupool-list schedbench
(./schedbench -f ${file} run | tee ${prefix}.log) || break
done
grep early ${run}*.log
./schedbench -f ${file} -v 1 report > ${run}.txt || break
| gwd/schedbench | scripts/schedbench.sh | Shell | gpl-2.0 | 670 |
#!/bin/sh
../configure --prefix=/usr CC=gcc-4.0 --with-tls --disable-profile CFLAGS="-pipe -fstrict-aliasing -g1 -O3"
| mseaborn/plash-glibc | myconfig.sh | Shell | gpl-2.0 | 119 |
function clean_docker() {
docker rmi -f $(docker images -f "dangling=true" -q)
docker rmi -f `docker images -aq`
docker rm $(docker ps --filter status=exited --quiet)
# remove exited containers:
#docker ps --filter status=dead --filter status=exited -aq | xargs -r docker rm -v
# remove unused images:
#docker images --no-trunc | grep '<none>' | awk '{ print $3 }' | xargs -r docker rmi
# remove unused volumes:
#find '/var/lib/docker/volumes/' -mindepth 1 -maxdepth 1 -type d | grep -vFf <(
# docker ps -aq | xargs docker inspect | jq -r '.[] | .Mounts | .[] | .Name | select(.)'
#) | xargs -r rm -fr
}
| sasubillis/linux-scripts | docker_clean.sh | Shell | gpl-2.0 | 609 |
#!/bin/sh
#
# Control bar LEDs
#
# (c) 2014 Teltonika
. /lib/led_functions.sh
show_options()
{
printf "usage: $0 <number of leds>\n"
}
blink_leds()
{
echo "timer" > "$LED_PATH/$led0/trigger"
echo "timer" > "$LED_PATH/$led1/trigger"
echo "timer" > "$LED_PATH/$led2/trigger"
echo "timer" > "$LED_PATH/$led3/trigger"
echo "timer" > "$LED_PATH/$led4/trigger"
[ "$1" = "red" ] && echo "timer" > "$LED_PATH/$ledr/trigger"
[ "$1" = "green" ] && echo "timer" > "$LED_PATH/$ledg/trigger"
exit 0
}
if [ -n "$1" ] && [ "$1" != "blink" ] && [ "$1" -gt 4 ]
then
show_options
exit 1
fi
all_init
all_off
led_off $ledr
led_off $ledg
[ -z "$1" ] && exit 0
[ "$1" = "blink" ] && blink_leds $2
[ "$1" -ge 0 ] && led_on $led0
[ "$1" -ge 1 ] && led_on $led1
[ "$1" -ge 2 ] && led_on $led2
[ "$1" -ge 3 ] && led_on $led3
[ "$1" -ge 4 ] && led_on $led4
exit 0
| ingran/balzac | package/base-files/files/usr/sbin/ledbar.sh | Shell | gpl-2.0 | 861 |
#!/bin/bash
fibos=( 1 2 )
last=2
sum=2
while [[ $last -le 4000000 ]]
do
last=$((${fibos[-1]}+${fibos[-2]}))
fibos+=($last)
if [[ $(($last % 2)) -eq 0 ]]; then
sum=$(($sum+$last))
fi
done
echo "ANSWER:$sum"
| chicks-net/euler-answers-chicks | problem002/answer002.sh | Shell | gpl-2.0 | 215 |
#!/bin/bash
#
# Copyright (c) 2018 SUSE LINUX GmbH, Nuernberg, Germany.
umask 022
PATH=/sbin:/bin:/usr/sbin:/usr/bin
export PATH
if [ -f /etc/sysconfig/btrfsmaintenance ] ; then
. /etc/sysconfig/btrfsmaintenance
fi
if [ -f /etc/default/btrfsmaintenance ] ; then
. /etc/default/btrfsmaintenance
fi
LOGIDENTIFIER='btrfs-defrag'
. $(dirname $(realpath "$0"))/btrfsmaintenance-functions
{
OIFS="$IFS"
IFS=:
exec 2>&1 # redirect stderr to stdout to catch all output to log destination
for P in $BTRFS_DEFRAG_PATHS; do
IFS="$OIFS"
if ! is_btrfs "$P"; then
echo "Path $P is not btrfs, skipping"
continue
fi
find "$P" -xdev -size "$BTRFS_DEFRAG_MIN_SIZE" -type f \
-exec btrfs filesystem defrag -t 32m -f $BTRFS_VERBOSITY '{}' \;
done
} | \
case "$BTRFS_LOG_OUTPUT" in
stdout) cat;;
journal) systemd-cat -t "$LOGIDENTIFIER";;
syslog) logger -t "$LOGIDENTIFIER";;
none) cat >/dev/null;;
*) cat;;
esac
exit 0
| kdave/btrfsmaintenance | btrfs-defrag.sh | Shell | gpl-2.0 | 929 |
#
# Setup videocore - Raspberry Userland
#
# Load utility functions
. ./functions.sh
if [ "$ENABLE_VIDEOCORE" = true ] ; then
# Copy existing videocore sources into chroot directory
if [ -n "$VIDEOCORESRC_DIR" ] && [ -d "$VIDEOCORESRC_DIR" ] ; then
# Copy local videocore sources
cp -r "${VIDEOCORESRC_DIR}" "${R}/tmp/userland"
else
# Create temporary directory for videocore sources
temp_dir=$(as_nobody mktemp -d)
# Fetch videocore sources
as_nobody git -C "${temp_dir}" clone "${VIDEOCORE_URL}"
# Copy downloaded videocore sources
mv "${temp_dir}/userland" "${R}/tmp/"
# Set permissions of the U-Boot sources
chown -R root:root "${R}/tmp/userland"
# Remove temporary directory for U-Boot sources
rm -fr "${temp_dir}"
fi
# Create build dir
mkdir "${R}"/tmp/userland/build
# push us to build directory
cd "${R}"/tmp/userland/build
if [ "$RELEASE_ARCH" = "arm64" ] ; then
cmake -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_BUILD_TYPE=release -DCMAKE_TOOLCHAIN_FILE="${R}"/tmp/userland/makefiles/cmake/toolchains/aarch64-linux-gnu.cmake -DARM64=ON -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DCMAKE_ASM_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} -U_FORTIFY_SOURCE" -DCMAKE_ASM_FLAGS="${CMAKE_ASM_FLAGS} -c" -DVIDEOCORE_BUILD_DIR="${R}" "${R}/tmp/userland"
fi
if [ "$RELEASE_ARCH" = "armel" ] ; then
cmake -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_BUILD_TYPE=release -DCMAKE_TOOLCHAIN_FILE="${R}"/tmp/userland/makefiles/cmake/toolchains/arm-linux-gnueabihf.cmake -DCMAKE_C_COMPILER=arm-linux-gnueabi-gcc -DCMAKE_CXX_COMPILER=arm-linux-gnueabi-g++ -DCMAKE_ASM_COMPILER=arm-linux-gnueabi-gcc -DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} -U_FORTIFY_SOURCE" -DCMAKE_ASM_FLAGS="${CMAKE_ASM_FLAGS} -c" -DCMAKE_SYSTEM_PROCESSOR="arm" -DVIDEOCORE_BUILD_DIR="${R}" "${R}/tmp/userland"
fi
if [ "$RELEASE_ARCH" = "armhf" ] ; then
cmake -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_BUILD_TYPE=release -DCMAKE_TOOLCHAIN_FILE="${R}"/tmp/userland/makefiles/cmake/toolchains/arm-linux-gnueabihf.cmake -DVIDEOCORE_BUILD_DIR="${R}" "${R}/tmp/userland"
fi
#build userland
make -j "$(nproc)"
#back to root of scriptdir
cd "${WORKDIR}"
# Remove videocore sources
rm -fr "${R}"/tmp/userland/
fi
| drtyhlpr/rpi23-gen-image | bootstrap.d/43-videocore.sh | Shell | gpl-2.0 | 2,305 |
#!/bin/bash
# In The Name of God
# ========================================
# [] File Name : alacritty.sh
#
# [] Creation Date : 18-11-2020
#
# [] Created By : Parham Alvani <[email protected]>
# =======================================
usage() {
echo -n "alacritty terminal with jetbrains mono font and configuration"
# shellcheck disable=2016
echo '
_ _ _ _
__ _| | __ _ ___ _ __(_) |_| |_ _ _
/ _` | |/ _` |/ __| |__| | __| __| | | |
| (_| | | (_| | (__| | | | |_| |_| |_| |
\__,_|_|\__,_|\___|_| |_|\__|\__|\__, |
|___/
'
}
main_brew() {
brew install --cask alacritty
}
main_pacman() {
sudo pacman -Syu --noconfirm --needed alacritty
}
main_apt() {
msg "there is nothing that we can do"
return 1
}
main() {
configfile alacritty
}
| 1995parham/dotfiles | scripts/alacritty.sh | Shell | gpl-2.0 | 822 |
#!/bin/bash
function main() {
jar cvfm UFM.jar manifest.txt com/itdominator/ufm/*.class \
com/itdominator/ufm/TabSystem \
com/itdominator/ufm/ThumbnailSystem \
com/itdominator/ufm/Utils \
com/itdominator/ufm/resources
chmod +x UFM.jar
}
main;
| maximstewart/UDE | UFM/buildJar.sh | Shell | gpl-2.0 | 388 |
#!/bin/bash
### Description: Start ssh service in container
### Written by: Alex Yin - [email protected] on 11-06-2015
__create_user() {
# Create a user to SSH into as.
useradd admin
SSH_USERPASS=abcd.9527
echo -e "$SSH_USERPASS\n$SSH_USERPASS" | (passwd --stdin admin)
echo ssh user password: $SSH_USERPASS
}
# Call all functions
__create_user
| mooncreeks/Centos-Dockerfiles | ssh/centos7/start.sh | Shell | gpl-2.0 | 349 |
#!/bin/bash
if [ -z "$1" ] ; then
echo input file missing
echo "Usage: $0 input.cu8 output.sr [sample rate in kHz]"
exit 1
fi
if [ ! -r "$1" ] ; then
echo input not found
echo "Usage: $0 input.cu8 output.sr [sample rate in kHz]"
exit 1
fi
file=$1
if [ -z "$2" ] ; then
echo output file missing
echo "Usage: $0 input.cu8 output.sr [sample rate in kHz]"
exit 1
fi
if [ -e "$2" ] ; then
echo output already exists
echo "Usage: $0 input.cu8 output.sr [sample rate in kHz]"
exit 1
fi
out=$2
if [ -z "$3" ] ; then
rate=250
else
rate=$3
fi
if [ ! -z "$4" ] ; then
echo too many arguments
echo "Usage: $0 input.cu8 output.sr [sample rate in kHz]"
exit 1
fi
# create channels
rtl_433 -s ${rate}k -r "$file" -w F32:I:analog-1-4-1 -w F32:Q:analog-1-5-1 -w F32:AM:analog-1-6-1 -w F32:FM:analog-1-7-1 -w U8:LOGIC:logic-1-1 >/dev/null 2>&1
# create version tag
echo -n "2" >version
# create meta data
cat >metadata <<EOF
[device 1]
capturefile=logic-1
total probes=3
samplerate=$rate kHz
total analog=4
probe1=FRAME
probe2=ASK
probe3=FSK
analog4=I
analog5=Q
analog6=AM
analog7=FM
unitsize=1
EOF
zip "$out" version metadata analog-1-4-1 analog-1-5-1 analog-1-6-1 analog-1-7-1 logic-1-1
rm version metadata analog-1-4-1 analog-1-5-1 analog-1-6-1 analog-1-7-1 logic-1-1
| zerog2k/rtl_433 | examples/sigrok-conv.sh | Shell | gpl-2.0 | 1,293 |
#! /bin/bash
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
set -e
if [ "${SAGE_AGE}" == "-1" ]; then
sudo add-apt-repository ppa:aims/sagemath -y
sudo apt-get update -qq
sudo apt-get install sagemath-upstream-binary -y
cd $HOME
mkdir -p SageMath
else
SAGE_IMAGE=`python2 -c "import sage_version; print sage_version.get_all_version_names('${SAGE_SERVER}index.html',${SAGE_AGE})"`
cd $HOME
echo "Obtaining Sage image:" ${SAGE_IMAGE}
if [ ! -x SageMath/sage ] ; then
rm -f SageMath.tar.bz2
wget --progress=dot:giga ${SAGE_SERVER}${SAGE_IMAGE} -O SageMath.tar.bz2
tar xf SageMath.tar.bz2
fi
MAKE="make -j4"
export MAKE
# Install packages
#SageMath/sage -i lrslib
# To initialize matplotlib font manager
$HOME/SageMath/sage -python -c 'import matplotlib.pyplot'
$HOME/SageMath/sage -pip install --user sphinxcontrib-websupport
fi
| jplab/brocoli | .travis-install.sh | Shell | gpl-2.0 | 1,061 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2022 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Like t3067, but this time we have an input and output group,
# so the group_link table is used.
. ./tup.sh
mkdir foo
mkdir bar
cat > foo/Tupfile << HERE
: ../bar/<group> |> cp ../bar/file.txt %o |> copy.txt | ../bar/<output>
HERE
cat > bar/Tupfile << HERE
: |> echo hey > %o |> file.txt | <group>
HERE
update
# For some reason this issue takes several iterations to show up
for i in a b c; do
rm -rf bar
tup scan
mkdir bar
cat > bar/Tupfile << HERE
: |> echo hey > %o |> file.txt | <group>
HERE
update
done
eotup
| gittup/tup | test/t3078-group-rmdir3.sh | Shell | gpl-2.0 | 1,273 |
#!/bin/sh
# ***** BEGIN LICENSE BLOCK *****
# This file is part of Natron <http://www.natron.fr/>,
# Copyright (C) 2016 INRIA and Alexandre Gauthier
#
# Natron is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Natron is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Natron. If not, see <http://www.gnu.org/licenses/gpl-2.0.html>
# ***** END LICENSE BLOCK *****
# MKJOBS: Number of threads
# CONFIG=(debug,release,relwithdebinfo): the build type
# DISABLE_BREAKPAD=1: When set, automatic crash reporting (google-breakpad support) will be disabled
# PLUGINDIR: The path to the plug-ins in the final bundle, e.g: "$CWD/build/Natron/App/Natron.app/Contents/Plugins"
#Usage PLUGINDIR="..." MKJOBS=4 CONFIG=relwithdebinfo BRANCH=workshop ./build-plugins.sh
source `pwd`/common.sh || exit 1
cd $CWD/build || exit 1
#If "workshop" is passed, use master branch for all plug-ins otherwise use the git tags in common.sh
if [ "$BRANCH" = "workshop" ]; then
IO_BRANCH=master
MISC_BRANCH=master
ARENA_BRANCH=master
CV_BRANCH=master
else
IO_BRANCH=$IOPLUG_GIT_TAG
MISC_BRANCH=$MISCPLUG_GIT_TAG
ARENA_BRANCH=$ARENAPLUG_GIT_TAG
CV_BRANCH=$CVPLUG_GIT_TAG
fi
if [ ! -d "$PLUGINDIR" ]; then
echo "Error: plugin directory '$PLUGINDIR' does not exist"
exit 1
fi
#Build openfx-io
git clone $GIT_IO
cd openfx-io || exit 1
git checkout "$IO_BRANCH" || exit 1
git submodule update -i --recursive || exit 1
if [ "$IO_BRANCH" = "master" ]; then
# the snapshots are always built with the latest version of submodules
git submodule foreach git pull origin master
fi
#Always bump git commit, it is only used to version-stamp binaries
IO_GIT_VERSION=`git log|head -1|awk '{print $2}'`
sed -i "" -e "s/IOPLUG_DEVEL_GIT=.*/IOPLUG_DEVEL_GIT=${IO_GIT_VERSION}/" $CWD/commits-hash.sh || exit 1
make CXX="$CXX" BITS=$BITS CONFIG=$CONFIG OCIO_HOME=/opt/local OIIO_HOME=/opt/local SEEXPR_HOME=/opt/local -j${MKJOBS} || exit 1
cp -r IO/$OS-$BITS-$CONFIG/IO.ofx.bundle "$PLUGINDIR/OFX/Natron" || exit 1
cd ..
#Build openfx-misc
git clone $GIT_MISC
cd openfx-misc || exit 1
git checkout "$MISC_BRANCH" || exit 1
git submodule update -i --recursive || exit 1
if [ "$MISC_BRANCH" = "master" ]; then
# the snapshots are always built with the latest version of submodules
git submodule foreach git pull origin master
fi
#Always bump git commit, it is only used to version-stamp binaries
MISC_GIT_VERSION=`git log|head -1|awk '{print $2}'`
sed -i "" -e "s/MISCPLUG_DEVEL_GIT=.*/MISCPLUG_DEVEL_GIT=${MISC_GIT_VERSION}/" $CWD/commits-hash.sh || exit 1
make CXX="$CXX" BITS=$BITS CONFIG=$CONFIG -j${MKJOBS} HAVE_CIMG=0 || exit 1
cp -r Misc/$OS-$BITS-$CONFIG/Misc.ofx.bundle "$PLUGINDIR/OFX/Natron" || exit 1
make -C CImg CImg.h || exit 1
if [ "$COMPILER" = "gcc" ]; then
# build CImg with OpenMP support
make -C CImg CXX="$CXX" BITS=$BITS CONFIG=$CONFIG -j${MKJOBS} CXXFLAGS_ADD=-fopenmp LDFLAGS_ADD=-fopenmp || exit 1
elif [ -n "$GXX" ]; then
# GCC is available too!
# libSupport was compiled by clang, now clean it to build it again with gcc
make CXX="$CXX" BITS=$BITS CONFIG=$CONFIG -j${MKJOBS} HAVE_CIMG=0 clean || exit 1
# build CImg with OpenMP support, but statically link libgomp (see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=31400)
make -C CImg CXX="$GXX" BITS=$BITS CONFIG=$CONFIG -j${MKJOBS} CXXFLAGS_ADD=-fopenmp LDFLAGS_ADD="-fopenmp -static-libgcc" || exit 1
fi
cp -r CImg/$OS-$BITS-$CONFIG/CImg.ofx.bundle "$PLUGINDIR/OFX/Natron" || exit 1
cd ..
#Build openfx-arena
git clone $GIT_ARENA
cd openfx-arena || exit 1
git checkout "$ARENA_BRANCH" || exit 1
git submodule update -i --recursive || exit 1
if [ "$ARENA_BRANCH" = "master" ]; then
# the snapshots are always built with the latest version of submodules
if true; then
git submodule foreach git pull origin master
else
echo "Warning: openfx-arena submodules not updated..."
fi
fi
# ImageMagick on OSX is usually compiled without openmp
echo "Warning: removing -lgomp from MAGICK_LINKFLAGS, since ImageMagick on OSX is compiled without OMP support"
sed -e s/-lgomp// -i.orig Makefile.master
#Always bump git commit, it is only used to version-stamp binaries
ARENA_GIT_VERSION=`git log|head -1|awk '{print $2}'`
sed -i "" -e "s/ARENAPLUG_DEVEL_GIT=.*/ARENAPLUG_DEVEL_GIT=${ARENA_GIT_VERSION}/" $CWD/commits-hash.sh || exit 1
make CXX="$CXX" USE_PANGO=1 USE_SVG=1 STATIC=1 BITS=$BITS CONFIG=$CONFIG -j${MKJOBS} || exit 1
cp -r Bundle/$OS-$BITS-$CONFIG/Arena.ofx.bundle "$PLUGINDIR/OFX/Natron" || exit 1
cd ..
#Dump symbols for breakpad before stripping
if [ "$DISABLE_BREAKPAD" != "1" ]; then
for bin in IO Misc CImg Arena; do
binary="$PLUGINDIR"/${bin}.ofx.bundle/*/*/$bin.ofx
# DSYM_64=${bin}x86_64.dSYM
# DSYM_32=${bin}i386.dSYM
# dsymutil -arch x86_64 -o $DSYM_64 "$binary"
# dsymutil -arch i386 -o $DSYM_32 "$binary"
$DUMP_SYMS -a x86_64 "$PLUGINDIR"/${bin}.ofx.bundle/Contents/$OS-$BITS-$CONFIG/${bin}.ofx > "$CWD/build/symbols/${bin}.ofx-${TAG}-Mac-x86_64.sym"
$DUMP_SYMS -a i386 "$PLUGINDIR"/${bin}.ofx.bundle/Contents/$OS-$BITS-$CONFIG/${bin}.ofx > "$CWD/build/symbols/${bin}.ofx-${TAG}-Mac-i386.sym"
# rm -rf $DSYM_64
# rm -rf $DSYM_32
#Strip binary
if [ -x "$binary" ]; then
echo "* stripping $binary";
# Retain the original binary for QA and use with the util 'atos'
#mv -f "$binary" "${binary}_FULL";
if lipo "$binary" -verify_arch i386 x86_64; then
# Extract each arch into a "thin" binary for stripping
lipo "$binary" -thin x86_64 -output "${binary}_x86_64";
lipo "$binary" -thin i386 -output "${binary}_i386";
# Perform desired stripping on each thin binary.
strip -S -x -r "${binary}_i386";
strip -S -x -r "${binary}_x86_64";
# Make the new universal binary from our stripped thin pieces.
lipo -arch i386 "${binary}_i386" -arch x86_64 "${binary}_x86_64" -create -output "${binary}";
# We're now done with the temp thin binaries, so chuck them.
rm -f "${binary}_i386";
rm -f "${binary}_x86_64";
fi
#rm -f "${binary}_FULL";
fi
done
fi
# move all libraries to the same place, put symbolic links instead
for plugin in "$PLUGINDIR"/*.ofx.bundle; do
cd "$plugin/Contents/Libraries"
for lib in lib*.dylib; do
if [ -f "../../../../Frameworks/$lib" ]; then
rm "$lib"
else
mv "$lib" "../../../../Frameworks/$lib"
fi
ln -sf "../../../../Frameworks/$lib" "$lib"
done
if [ "$COMPILER" = "gcc" ]; then # use gcc's libraries everywhere
for l in gcc_s.1 gomp.1 stdc++.6; do
lib=lib${l}.dylib
for deplib in "$plugin"/Contents/MacOS/*.ofx "$plugin"/Contents/Libraries/lib*dylib ; do
install_name_tool -change /usr/lib/$lib @executable_path/../Frameworks/$lib $deplib
done
done
fi
done
#Build openfx-opencv
#git clone $GIT_OPENCV
#cd openfx-opencv || exit 1
#git checkout "$CV_BRANCH" || exit 1
#git submodule update -i --recursive || exit 1
#if [ "$CV_BRANCH" = "master" ]; then
# # the snapshots are always built with the latest version of submodules
# git submodule foreach git pull origin master
#fi
#Always bump git commit, it is only used to version-stamp binaries
#CV_GIT_VERSION=`git log|head -1|awk '{print $2}'`
#sed -i -e "s/CVPLUG_DEVEL_GIT=.*/CVPLUG_DEVEL_GIT=${CV_GIT_VERSION}/" $CWD/commits-hash.sh || exit 1
#cd opencv2fx || exit 1
#make CXX="$CXX" BITS=$BITS CONFIG=$CONFIG -j${MKJOBS} || exit 1
#cp -r */$OS-$BITS-*/*.ofx.bundle "$PLUGINDIR" || exit 1
#cd ..
| olear/Natron | tools/MacOSX/build-plugins.sh | Shell | gpl-2.0 | 8,262 |
#
# Copyright 2003 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
# @test
# @bug 4262583 4418997 4795136
# @summary Check support for jar file members with sizes > 2GB
# @author Martin Buchholz
#
# @build FileBuilder
# @run shell 3GBZipFiles.sh 9986
# @ignore runs for hours and eats up 7 Gigabytes of disk space
# @run shell/timeout=604800 3GBZipFiles.sh 3141592653
# Command-line usage:
# javac FileBuilder.java && sh 3GBZipFiles.sh /path/to/jdk filesize
# -------------------------------------------------------------------
# Testing strategy: We want to test for size limits on the Jar file
# itself, as well as on the compressed and uncompressed sizes of the
# files stored therein. All of these limits should be 4GB and should
# be tested in the 2GB-4GB range. We also want to check that it is
# possible to store more than 6GB of actual data in a zip file, if we
# have two files of size 3GB which compress nicely. We also want to
# test both the "STORED" and "DEFLATED" compression methods.
# -------------------------------------------------------------------
die () { echo "$1" >&2; exit 1; }
sys () { "$@" || die "Command $@ failed: rc=$?"; }
set -u
myName=`printf %s "$0" | sed 's:.*[/\\]::'`;
if test -z "${TESTJAVA-}"; then
test "$#" -eq 2 || die "Usage: $myName /path/to/jdk filesize"
TESTJAVA="$1"; shift
TESTCLASSES="`pwd`"
fi
hugeSize="$1"; shift
tinySize=42
JAVA="$TESTJAVA/bin/java"
JAR="$TESTJAVA/bin/jar"
currentDir="`pwd`"
tmpDir="$myName.tmp"
cleanup () { cd "$currentDir" && rm -rf "$tmpDir"; }
trap cleanup 0 1 2 15
sys rm -rf "$tmpDir"
sys mkdir "$tmpDir"
cd "$tmpDir"
buildFile ()
{
filetype_="$1"
filename_="$2"
case "$filename_" in
huge-*) filesize_="$hugeSize" ;;
tiny-*) filesize_="$tinySize" ;;
esac
sys "$JAVA" "-cp" "$TESTCLASSES" "FileBuilder" \
"$filetype_" "$filename_" "$filesize_"
}
testJarFile ()
{
echo "-------------------------------------------------------"
echo "Testing $1 $2"
echo "-------------------------------------------------------"
filetype="$1"
if test "$2" = "STORED"; then jarOpt="0"; else jarOpt=""; fi
filelist="$3"
jarFile="$myName.jar"
for file in $filelist; do
buildFile "$filetype" "$file"
done
sys "$JAR" cvM${jarOpt}f "$jarFile" $filelist
sys ls -l "$jarFile"
sys "$JAR" tvf "$jarFile"
for file in $filelist; do
case "$file" in
huge-*) size="$hugeSize" ;;
tiny-*) size="$tinySize" ;;
esac
case "`$JAR tvf $jarFile $file`" in
*"$size"*"$file"*) : ;;
*) die "Output of \"jar tvf\" is incorrect." ;;
esac
# Try to minimize disk space used while verifying the jar file.
sum1="`sum $file`"
sys rm "$file"
sys "$JAR" xvf "$jarFile" "$file"
sum2="`sum $file`"
test "$sum1" = "$sum2" || die "Jar File is corrupted."
sys rm "$file"
# unzip $jarFile $file
# sys rm "$file"
done
sys rm "$jarFile"
}
testJarFile "MostlyEmpty" "DEFLATED" "tiny-1 huge-1 tiny-2 huge-2 tiny-3"
testJarFile "MostlyEmpty" "STORED" "tiny-1 huge-1 tiny-2"
testJarFile "SlightlyCompressible" "DEFLATED" "tiny-1 huge-1 tiny-2"
cleanup
exit 0
| TheTypoMaster/Scaper | openjdk/jdk/test/java/util/zip/3GBZipFiles.sh | Shell | gpl-2.0 | 4,096 |
g++ -std=c++11 -I.. -o test_opencl_gini test_opencl_gini.cpp -lctemplate
| albertvucinovic/memrest | test/test_opencl_gini.sh | Shell | gpl-2.0 | 73 |
#!/bin/bash
#Crosslink
#Copyright (C) 2016 NIAB EMR
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License along
#with this program; if not, write to the Free Software Foundation, Inc.,
#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#contact:
#[email protected]
#Robert Vickerstaff
#NIAB EMR
#New Road
#East Malling
#WEST MALLING
#ME19 6BJ
#United Kingdom
#
# test crosslink on simulated data
# note:grid_run is a simple wrapper on top of SunGridEngine's qsub
#
set -eu
export PATH=${PATH}:/home/vicker/git_repos/crosslink/compare_progs
OUTDIR=/home/vicker/crosslink/ploscompbiol_data/erate_simdata
SCRIPTDIR=${CROSSLINK_PATH}/compare_progs
cd ${OUTDIR}
MAXJOBS=12
GIGS=3
rm -f joblist
eratelist='0.001 0.005 0.01 0.03 0.06'
for erate in ${eratelist}
do
for SAMPLE_DIR in sample_data/${erate}_*
do
export SAMPLE_DIR
echo ${SAMPLE_DIR}
SAMPLEBASE=$(basename ${SAMPLE_DIR})
grid_run -L${MAXJOBS} -M${GIGS} -Jmstmap_${SAMPLEBASE} "${SCRIPTDIR}/run_mstmap.sh" >> joblist
grid_run -L${MAXJOBS} -M${GIGS} -Jlepmap_${SAMPLEBASE} "${SCRIPTDIR}/run_lepmap.sh" >> joblist
grid_run -L${MAXJOBS} -M${GIGS} -Jtmap_${SAMPLEBASE} "${SCRIPTDIR}/run_tmap.sh" >> joblist
grid_run -L${MAXJOBS} -M${GIGS} -Jom_ug_${SAMPLEBASE} "${SCRIPTDIR}/run_onemap.sh om_ug" >> joblist
grid_run -L${MAXJOBS} -M${GIGS} -Jcl_approx_${SAMPLEBASE} "${SCRIPTDIR}/run_crosslink.sh cl_approx" >> joblist
grid_run -L${MAXJOBS} -M${GIGS} -Jcl_full_${SAMPLEBASE} "${SCRIPTDIR}/run_crosslink.sh cl_full" >> joblist
done
done
#wait for programs to finish running
#grid_wait -Ljoblist
| eastmallingresearch/crosslink | compare_progs/compare_progs_erate.sh | Shell | gpl-2.0 | 2,181 |
#! /bin/sh
../turtle/turtle -p../crawl $1
gcc -I.. -I../crawl -S -O2 `basename $1 .t`.c
| timfel/turtle | tools/make-assembler.sh | Shell | gpl-2.0 | 89 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
PKG_NAME="GNOME Phone Manager"
(test -f $srcdir/configure.in) || {
echo -n "**Error**: Directory "\`$srcdir\'" does not look like the"
echo " top-level directory"
exit 1
}
which gnome-autogen.sh || {
echo "You need to install gnome-common from the GNOME CVS"
exit 1
}
REQUIRED_AUTOMAKE_VERSION=1.8 USE_GNOME2_MACROS=1 . gnome-autogen.sh
| GNOME/phonemgr | autogen.sh | Shell | gpl-2.0 | 478 |
#!/usr/bin/env bash
## author:: sadican
## description:: installation script for Floodlight controller
## https://github.com/floodlight/floodlight
## linux distro:: Ubuntu 14.04.1 LTS x64
# fail on error
set -e
# fail on unset var usage
set -o nounset
echo -e "\nINSTALLING REQUIRED PACKAGES\n"
sudo apt-get install -y git build-essential default-jdk ant python-dev
echo -e "\nCLONING FLOODLIGHT FROM GITHUB\n"
git clone git://github.com/floodlight/floodlight.git
echo -e "\nENTERING FLOODLIGHT DIRECTORY\n"
cd floodlight
echo -e "\nCOMPILING\n"
ant
echo -e "\nPREPARING ECLIPSE PROJECT\n"
ant eclipse
exit
| sadican/myScripts | floodlight.sh | Shell | gpl-2.0 | 615 |
#!/bin/bash
# Push changes from laptop to [email protected]/p/owsx/git
# http://www.siteground.com/tutorials/git/commands.htm
git push origin master
| devnull5475/SI_ORAWSV_POC | bin/push_gh.sh | Shell | gpl-2.0 | 156 |
#!/bin/bash
# set to path where scripts are, usually /home/$USER/bin/otrAutoDownload
otrAutoDownloadPath="/home/florin/bin/otrAutoDownload"
source $otrAutoDownloadPath/otr.conf
# grep new links
$otrAutoDownloadPath/otrGrepLinks.sh $otrAutoDownloadPath
if [ -f "$otrAutoDownloadPath/happyHour.lock" ]
then
echo "Downloads are already running."
else
$otrAutoDownloadPath/otrHappyHour.sh $otrAutoDownloadPath
fi
# decode new files
$otrAutoDownloadPath/otrDecodeAllFiles.sh $otrAutoDownloadPath
| wasmitnetzen/otrAutoDownload | cron.sh | Shell | gpl-2.0 | 499 |
#!/bin/bash
echo ""
echo ""
# Copy VideoModes
cp ~/rp-video-manager/all-videomodes/videomodes-updated2.cfg /opt/retropie/configs/all/videomodes.cfg
# SNES
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/snes/retroarch-overlay-thorig-1080p-snes.cfg /opt/retropie/configs/snes/retroarch.cfg
# Sega Mastersystem
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/mastersystem/retroarch-overlay-thorig-1080p-mastersystem.cfg /opt/retropie/configs/mastersystem/retroarch.cfg
# Sega Megadrive
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/megadrive/retroarch-overlay-thorig-1080p-megadrive.cfg /opt/retropie/configs/megadrive/retroarch.cfg
# Neo Geo
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/neogeo/retroarch-overlay-thorig-1080p-neogeo.cfg /opt/retropie/configs/neogeo/retroarch.cfg
# NES
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/nes/retroarch-overlay-thorig-1080p-nes.cfg /opt/retropie/configs/nes/retroarch.cfg
# PSX
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/psx/retroarch-overlay-thorig-1080p-psx.cfg /opt/retropie/configs/psx/retroarch.cfg
# PC Engine
cp ~/rp-video-manager/users/thorig/system-retroarch-cfg/pcengine/retroarch-overlay-thorig-1080p-pcengine.cfg /opt/retropie/configs/pcengine/retroarch.cfg
echo "Overlays have been applied for SNES, Megadrive, Mastersysten, Neo Geo, NES, PC Engine and PSX"
echo "There are no shaders applied with these settings"
echo "These settings are from user thorig: http://blog.petrockblock.com/community/members/thorig/"
echo ""
| alfo456/display-switch | 1080.sh | Shell | gpl-3.0 | 1,687 |
ant deploy && ./script/run-server.sh
| willurd/gps-bukkit-plugin | script/build-and-run.sh | Shell | gpl-3.0 | 37 |
#!/bin/bash -eux
set -o pipefail
retry.py add-apt-repository 'ppa:fkrull/deadsnakes'
retry.py apt-get update -qq
retry.py apt-get install -qq \
shellcheck \
python2.4 \
retry.py pip install tox --disable-pip-version-check
echo '{"verified": false, "results": []}' > test/results/bot/ansible-test-failure.json
ansible-test compile --failure-ok --color -v --junit --requirements
ansible-test sanity --failure-ok --color -v --junit --tox --skip-test ansible-doc --python 3.5
ansible-test sanity --failure-ok --color -v --junit --tox --test ansible-doc --coverage
rm test/results/bot/ansible-test-failure.json
if find test/results/bot/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
echo "One or more of the above ansible-test commands recorded at least one test failure."
exit 1
fi
| Inspq/ansible | test/utils/shippable/other.sh | Shell | gpl-3.0 | 822 |
#!/bin/bash
# By default, we're running on port 8003 and
# in debug mode which means in the foreground
# plackup is installed as part of the Plack module
plackup -p 8003 bin/app.pl
| rpgwnn/framework | start.sh | Shell | gpl-3.0 | 185 |
#!/bin/sh
echo -n 'Preparing files...'
cd ..
rm -f tetzle.desktop.in
cp tetzle.desktop tetzle.desktop.in
sed -e '/^Name\[/ d' \
-e '/^GenericName\[/ d' \
-e '/^Comment\[/ d' \
-e '/^Icon/ d' \
-e '/^Keywords/ d' \
-i tetzle.desktop.in
rm -f tetzle.appdata.xml.in
cp tetzle.appdata.xml tetzle.appdata.xml.in
sed -e '/p xml:lang/ d' \
-e '/summary xml:lang/ d' \
-e '/name xml:lang/ d' \
-e '/<developer_name>/ d' \
-i tetzle.appdata.xml.in
cd po
echo ' DONE'
echo -n 'Extracting messages...'
xgettext --from-code=UTF-8 --output=description.pot \
--package-name='Tetzle' --copyright-holder='Graeme Gott' \
../*.in
sed 's/CHARSET/UTF-8/' -i description.pot
echo ' DONE'
echo -n 'Cleaning up...'
cd ..
rm -f tetzle.desktop.in
rm -f tetzle.appdata.xml.in
echo ' DONE'
| gottcode/tetzle | data/unix/po/update-pot.sh | Shell | gpl-3.0 | 785 |
#!/bin/bash
# This script was written by Alessandro Accardo
# You can redistribute or modify or whatever you want to do with this piece of code, under the terms of the GNU GPL v3
# Copyright © Alessandro Accardo.
function getvalue() { # Much less complicated to read
egrep -i ^$2=\".*\" $1 | cut -d'"' -f2
}
function do-keyvalue-action() {
local action="$1"
local onlykey="$2"
local key="$3"
local value="$4"
local prettyfmt="$5"
case "$onlykey" in
"0")
if [ "$prettyfmt" == "1" ]; then local eq=" = "; else local eq="="; fi
$action "${key}${eq}${value}"
;;
"1")
$action "$key"
;;
"-1")
$action "$value"
;;
esac
}
function get-url-part() {
local part="$1"
local url="$2"
case "$part" in
proto)
echo $url | cut -d: -f1
;;
username)
echo $url | cut -d/ -f3 | cut -d@ -f1 -s | cut -d: -f1 -s
;;
password)
echo $url | cut -d/ -f3 | cut -d@ -f1 -s | cut -d: -f2 -s
;;
host)
echo $url | cut -d/ -f3 | cut -d@ -f2 | cut -d: -f1
;;
hostname)
echo $url | cut -d/ -f3 | cut -d@ -f2 | cut -d: -f1 | cut -d. -f1
;;
domain)
echo $url | cut -d/ -f3 | cut -d@ -f2 | cut -d: -f1 | rev | cut -d. -f1,2 | rev
;;
tld)
echo $url | cut -d/ -f3 | cut -d@ -f2 | cut -d: -f1 | rev | cut -d. -f1 | rev
;;
port)
echo $url | cut -d/ -f3 | cut -d@ -f2 | cut -d: -f2 -s
;;
path)
echo $url | cut -d/ -f4- -s | cut -d? -f1
;;
query)
echo $url | cut -d? -f2- -s | cut -d# -f1
;;
comment)
echo $url | cut -d# -f2 -s
;;
esac
}
function getsysproxyvar() {
getvalue $PROXY_CONF $1
}
function toggleproxy() {
[[ -z "$1" ]] && echo "Error! action argument empty!" && return 1
local action="$1"
local protocols=(`getsysproxyvar PROTOCOLS`)
local modules=(`getsysproxyvar MODULES`)
for mod in "${modules[@]}"
do
local mod_src=`dirname $PROXY_CONF`/modules/$mod.pxm
echo Loading $mod_src...
source $mod_src
done
unset mod
for proto in "${protocols[@]}"
do
# Just to be sure that protocols come lowercase
local proto="$(echo $proto | tr '[:upper:]' '[:lower:]')"
# For each protocol I have to manage multiple modules
for mod in "${modules[@]}"
do
#echo "Module \"$mod\" is ${action}ing ${proto} proxy..."
do-proxy-conf-$mod-$action $proto
done
unset mod
done
unset mod
return 0
}
function checkproxy() {
toggleproxy "check"
}
function unsetproxy() {
toggleproxy "disable"
echo "Proxy disabled!"
}
function setproxy() {
toggleproxy "enable"
echo "Proxy enabled!"
}
function autoproxy() {
[ -n "$DISABLE_AUTOPROXY" ] && return 0
local enabled="`getsysproxyvar ENABLED`"
case "$enabled" in
1|[yY]|[yY][eE][sS])
setproxy
;;
0|[nN]|[nN][oO])
unsetproxy
;;
*)
echo "Warning: nothing done. Variable 'ENABLED' not set or bad value."
;;
esac
}
autoproxy
| kLeZ/linux-proxy-layer | proxy.sh | Shell | gpl-3.0 | 2,970 |
#!/bin/bash
# modules/gnuplot/run.sh
# Copyright Vince Mulhollon 2014
# GPLv3 license
pkg install --no-repo-update --yes gnuplot | $NIHLOG
exit 0
| vincemulhollon/nihconfig | modules/gnuplot/run.sh | Shell | gpl-3.0 | 149 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=Cygwin_4.x-Windows
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=dll
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/cs_1581_nim
OUTPUT_BASENAME=cs_1581_nim
PACKAGE_TOP_DIR=cs1581nim/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/cs1581nim/bin"
copyFileToTmpDir "${OUTPUT_PATH}.exe" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}.exe" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/cs1581nim.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/cs1581nim.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
| MichaelCoughlinAN/Odds-N-Ends | C++/Nim/CS 1581 Nim/nbproject/Package-Debug.bash | Shell | gpl-3.0 | 1,473 |
!Configuração da Terceira Etapa: Configurar Switch Layer-2 2960 - S3-SENAI
!Acessando o modo exec user
enable
!Acessar modo de configuração global
configure terminal
!Configurando o VTP Client
vtp mode client
!Configurando o domínio do VTP
vtp domain cisco.br
!Configurañdo a senha do VTP
vtp password ccna
!Configurando as interfaces Trunk
interface range gigabitEthernet 0/1 - 2
!Configurando o modo de Trunk das interfaces
switchport mode trunk
!Saindo das configurações
exit
!Configuração as interfaces de acesso a VLAN 30
interface range fastEthernet 0/1 - 2
!Configurando o modo de acesso
switchport mode access
!Configurando a VLAN 30 nas interfaces
switchport access vlan 30
!Saindo das configurações
end
!Salvado as configurações
copy running-config startup-config
| vaamonde/netacad | modulo-02/capitulo-05/05-Etapa-03.sh | Shell | gpl-3.0 | 905 |
#!/bin/bash
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Upgrade VM to a new release if require
#
export DEBIAN_FRONTEND="noninteractive"
set -e
cd /tmp
rm -Rf gns3-vm-*
echo "Download https://github.com/GNS3/gns3-vm/archive/${BRANCH}.tar.gz"
curl -Lk "https://github.com/GNS3/gns3-vm/archive/${BRANCH}.tar.gz" > gns3vm.tar.gz
tar -xzf gns3vm.tar.gz
rm gns3vm.tar.gz
cd gns3-vm-${BRANCH}/config
sudo -E bash -x install.sh
sudo dpkg --configure -a
sudo apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade -y
curl -Lk "https://raw.githubusercontent.com/GNS3/gns3-vm/$BRANCH/scripts/welcome.py" > /tmp/gns3welcome.py
sudo mv "/tmp/gns3welcome.py" "/usr/local/bin/gns3welcome.py"
sudo chmod 755 "/usr/local/bin/gns3welcome.py"
set +e
# The upgrade from 0.8 to 0.8.6 is safe
if [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.8' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.8.1' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.8.2' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.8.3' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.8.4' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.8.5' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.0' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.1' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.2' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.3' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.4' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.5' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.6' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.7' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.8' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.9' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.9.10' ]
then
sudo apt-get -y dist-upgrade
sudo usermod -a -G vde2-net gns3
echo -n '0.10.0' > /home/gns3/.config/GNS3/gns3vm_version
fi
if [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.0' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.1' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.2' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.3' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.4' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.5' ]
then
sudo rm -f /usr/local/bin/vpcs
sed '/port = 8000$/d' -i /home/gns3/.config/GNS3/gns3_server.conf
echo -n '0.10.6' > /home/gns3/.config/GNS3/gns3vm_version
fi
if [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.6' ]
then
# It seem an upgrade of docker can leave dirty stuff
sudo rm -rf /var/lib/docker/aufs
echo -n '0.10.7' > /home/gns3/.config/GNS3/gns3vm_version
fi
if [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.7' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.8' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.9' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.10' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.11' ]
then
echo -n '0.10.12' > /home/gns3/.config/GNS3/gns3vm_version
fi
if [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.12' ] \
|| [ `cat /home/gns3/.config/GNS3/gns3vm_version` = '0.10.13' ]
then
sudo apt-get remove -y docker docker-engine
sudo rm /etc/apt/sources.list.d/*
curl -Lk "https://download.docker.com/linux/ubuntu/dists/trusty/pool/stable/amd64/docker-ce_17.03.1~ce-0~ubuntu-trusty_amd64.deb" > /tmp/docker.deb
sudo apt-get install -y libltdl7 libsystemd-journal0
sudo dpkg -i /tmp/docker.deb
echo -n '0.10.14' > /home/gns3/.config/GNS3/gns3vm_version
fi
| GNS3/gns3-vm | scripts/upgrade.sh | Shell | gpl-3.0 | 4,518 |
#!/bin/bash
# command to generate diff for upload to review board
# the diff is made between the merge base revision (commit where branch made)
# and the head of the branch.
# the git diff command and other details are printed to be copy/paste into
# description of review request.
#
function usage() {
echo "usage: ${0##*/} topics/myname/thebug /some/ouput/path/diff.out"
echo
exit 1
} # end usage()
#
# print banner with stars
#
function msg() {
local cnt=${#1};local sline='';local c
# create line of stars 4 longer than length of first argument to msg
for ((c=1;c<=(cnt+4);c++)); { sline=${sline}'*'; }
echo -e "\n\t${sline}\n\t* ${@}\n\t${sline}\n"
} # end msg()
# parameter checks
if (( $# < 2 )) || [[ $1 = -[h?] ]]
then
usage
fi
DIFFOPTS='--full-index --patience -B -U20'
TRUNK='nas/trunk'
BRANCH=${1}
OUTPUT=${2}
# sanity check that git is OK
if ! git pull 2>/dev/null
then
msg "Not in a git repo: ${PWD}\n" "\t* git pull"
usage
fi
if ! git checkout ${BRANCH} 2>/dev/null
then
msg "Could not checkout branch: ${BRANCH}\n" "\t* git checkout ${BRANCH}"
usage
fi
BRANCHg=$(git branch |grep ^\*| awk '{print $2}')
BASE=$(git merge-base ${BRANCHg} ${TRUNK})
HEAD=$(git rev-parse ${BRANCHg})
echo -e "\n"
set -x
git diff ${DIFFOPTS} ${BASE}..${HEAD} > ${OUTPUT}
set +x
msg "TRUNK: ${TRUNK} \n"\
"\t* BRANCH: ${BRANCHg}\n"\
"\t* BASE: ${BASE}\n"\
"\t* HEAD: ${HEAD}\n"\
"\t* OUTPUT: ${OUTPUT}"
| mikersgit/nixScripts | bash/reviewDiff.sh | Shell | gpl-3.0 | 1,524 |
#! /bin/bash
# Tests whether a specified file exists of not, illustrates if/then/else.
file=$1
if [ -e $file ]
then
echo -e "File $file exists"
else
echo -e "File $file doesn't exists"
fi
exit 0
| UoM-ResPlat-DevOps/HPCshells | Resources/filetest.sh | Shell | gpl-3.0 | 205 |
#!/bin/bash
echo "/run/beamOn 100000" > tmp.mac
for front in 0 3
do
for mirror in 0 1
do
for ene in 0.5 1 2 3 4 5
do
./E6ES -c 1 -e $ene -p $mirror -d $front -m tmp.mac
hadd -f ene"$ene"mirror"$mirror"det"$front"col1.root result_t*
./E6ES -c 1 -w -e $ene -p $mirror -d $front -m tmp.mac
hadd -f wide1ene"$ene"mirror"$mirror"det"$front"col1.root result_t*
done
done
done
| aogaki/E6ElectronSpectrometer | Scripts/converter.sh | Shell | gpl-3.0 | 408 |
#!/bin/bash
## ------------------------------------------------------------------------
## classicalMIPS, Roberto Hexsel, 23nov2012-13nov2015
## ------------------------------------------------------------------------
# set -x
if [ ! -v tree ] ; then
# you must set the location of the cMIPS root directory in the variable tree
# tree=${HOME}/cMIPS
# tree=${HOME}/cmips/cMIPS
export tree="$(echo $PWD | sed -e 's:\(/.*/xinu-cMIPS\)/.*:\1:')"
fi
bin=${tree}/bin
include=${tree}/include
srcVHDL=${tree}/vhdl
simulator="${tree}"/tb_cmips
visual="${tree}"/cMIPS.vcd
unset WAVE
length=1
unit=m
gtkwconf=pipe
synth=
recompile=false
touch input.data input.txt serial.inp
usage() {
cat << EOF
usage: $0 [options]
re-create simulator/model and run simulation
prog.bin and data.bin must be in the current directory
OPTIONS:
-h Show this message
-C re-create simulator by re-compiling all VHDL code
-t T number of time-units to run (default ${length})
-u U unit of time scale {m,u,n,p} (default ${unit}s)
-n send simulator output do /dev/null, else to v_cMIPS.vcd
-w invoke GTKWAVE -- stdin will not read input from keyboard
-v F gtkwave configuration file (e.g. pipe.sav, default v.sav)
-syn run simulation with synthesis RAM/ROM addresses
EOF
}
while true ; do
case "$1" in
-h | "-?") usage ; exit 1
;;
-C) recompile=true
;;
-t) length=$2
shift
;;
-u) unit=$2
shift
;;
-n) visual=/dev/null
;;
-w) WAVE=true
;;
-syn | -mif ) synth="-syn"
;;
-v) gtkwconf=$2
shift
;;
-x) set -x
;;
"") break
;;
*) usage ; echo " invalid option: $1"; exit 1
;;
esac
shift
done
gfile=${gtkwconf%%.sav}
sav="${tree}"/${gfile}.sav
if [ $recompile = true ] ; then
"${bin}"/build.sh $synth || exit 1
fi
options="--ieee-asserts=disable --stop-time=${length}${unit}s --vcd=${visual}"
if [ -v $WAVE ] ; then
## simulator must be exec'd so it can read from the standard input
exec "${simulator}" $options --vcd-nodate
else
"${simulator}" $options ; gtkwave -O /dev/null -f ${visual} -a ${sav} &
fi
## --wave=${visual%.vcd}.ghw
| rhexsel/xinu-cMIPS | bin/run.sh | Shell | gpl-3.0 | 2,277 |
mlocarna example3.fa --alifold-consensus-dp --struct-weight=200 --use-ribosum=true --max-diff-am=30 --match=50 --max-diff=60 --min-prob=5.0E-4 --tree-min-prob=0.005 --rnafold-temperature=37.0 --noLP --plfold-span=150 --indel=-350 --indel-opening=-500 --sequ-local=false --mismatch=0 --write-structure
| s-will/LocARNA | Data/Examples/Web/call3.sh | Shell | gpl-3.0 | 302 |
#!/bin/sh
sv_stop() {
for daemon in `ls /etc/daemons/*`; do
/sbin/sv stop $daemon
done
}
trap "sv_stop; exit" SIGTERM
runsvdir /etc/daemons
| erlend/docker-puppetmaster | usr/sbin/runsvinit.sh | Shell | gpl-3.0 | 151 |
#!/bin/bash
cd src/kernel
./compileKernel.sh
cd ../../
as src/boot/boot.S -o obj/boot.o
ld obj/boot.o --oformat=binary -Ttext 0x7c00 -o bin/boot.bin
toolchain/bin/makeImage
| krzyswit2/randomOS | compile.sh | Shell | gpl-3.0 | 176 |
#! /bin/sh
#
# This is a collection of functions used to carry out common tasks
# in Schroedinger helper scripts. It won't run as a stand-alone script.
#
if test -e $HOME/.bashrc; then
source $HOME/.bashrc
fi
set_mpi_and_compiler_flavor () {
flavor="$1"; shift
if test -d .bzr -a -e .bzr/branch/last-revision; then
current_rev=$( cat .bzr/branch/last-revision | (read revno rest; echo r$revno) );
elif test -d .svn -a -e .svn/entries; then
current_rev=r$( head -n 4 .svn/entries | tail -n 1 );
elif bzr revno >/dev/null 2>&1; then
current_rev=r$( bzr revno );
elif svn info >/dev/null 2>&1; then
current_rev=r$( env LC_ALL=C svn info | grep ^Revision: | cut -d' ' -f2 )
else
current_rev=UNKNOWN
fi
rev="$(echo $flavor | cut -d/ -f1)"
compiler_and_mpilib="$(echo $flavor | cut -d/ -f2)"
if [ -z "$compiler_and_mpilib" -o "x$compiler_and_mpilib" = "x$rev" ]; then
compiler_and_mpilib="$rev"
# rev can be omitted - try to get it from the BZR/SVN repository
rev="$current_rev"
fi
compiler="$(echo $compiler_and_mpilib | (IFS=- read one two; echo $one) )"
if [ -z "$compiler" ]; then
compiler=gcc443
fi
mpi="$(echo $compiler_and_mpilib | (IFS=- read one two; echo $two) )"
if [ -z "$mpi" ]; then
mpi=ompi
fi
# rebuild flavor as a correct filename
flavor="${rev}-${compiler}-${mpi}"
## load modules
source /panfs/panfs0.ften.es.hpcn.uzh.ch/share/software/Modules/3.2.8/init/sh
supported_compilers='gcc412 gcc434 gcc441 gcc443 gcc450 icc'
supported_mpilibs='openmpi parastation parastation-mt mvapich intel none'
# load MPI - must match what the binary was compiled with!
case "$mpi" in
ompi|openmpi) # systemwide OpenMPI
pe=openmpi2
case "$compiler" in
gcc450) module load mpi/openmpi/gcc-4.5.0 ;;
gcc*) module load mpi/openmpi/gcc ;;
icc|intel) module load mpi/openmpi/intel ;;
esac
;;
ompi*) # My own OpenMPI install in ${sw}/lib etc.
pe=openmpi2
;;
para*) # systemwide Parastation
pe=parastation
export PATH=/opt/parastation/mpi2/bin:/opt/parastation/bin:$PATH
export LD_LIBRARY_PATH=/opt/parastation/mpi2/lib:$LD_LIBRARY_PATH
export LD_RUN_PATH=$LD_LIBRARY_PATH
# apparently required for PSMPI to run,
# see ChriBo's email to hpcnlist on 2011-02-09
export PBS_NODEFILE=$TMPDIR/machines
;;
para*mt) # systemwide Parastation w/ threads support
pe=parastation
export PATH=/opt/parastation/mpi2-mt/bin:/opt/parastation/bin:$PATH
export LD_LIBRARY_PATH=/opt/parastation/mpi2-mt/lib:$LD_LIBRARY_PATH
export LD_RUN_PATH=$LD_LIBRARY_PATH
# apparently required for PSMPI to run,
# see ChriBo's email to hpcnlist on 2011-02-09
export PBS_NODEFILE=$TMPDIR/machines
;;
mvapich) # MVAPICH is not supported by modules, apparently
pe=mpich_rsh
case "$compiler" in
gcc412)
export PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/mpi-libs/gcc/mvapich2-1.4rc2/bin:$PATH
export LD_LIBRARY_PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/mpi-libs/gcc/mvapich2-1.4rc2/lib:$LD_LIBRARY_PATH
export LD_RUN_PATH=$LD_LIBRARY_PATH
;;
gcc*)
export PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/mpi-libs/gcc-4.4.1/mvapich2-1.4rc2/bin:$PATH
export LD_LIBRARY_PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/mpi-libs/gcc-4.4.1/mvapich2-1.4rc2/lib:$LD_LIBRARY_PATH
export LD_RUN_PATH=$LD_LIBRARY_PATH
;;
esac
;;
mvapich*) # My own MVAPICH2 install in ${sw}/lib etc.
pe=mpich_rsh
;;
impi|intel) # Intel MPI is not supported by modules, apparently
pe=openmpi2
export PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/mpi-libs/intel_mpi/3.2.1.009/bin64:$PATH
export LD_LIBRARY_PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/mpi-libs/intel_mpi/3.2.1.009/lib64:$LD_LIBRARY_PATH
export LD_RUN_PATH=$LD_LIBRARY_PATH
;;
none) # no MPI
while type -a mpicc >/dev/null 2>&1; do
p=$(dirname $(which mpicc) )
echo 1>&2 "MPI compiler 'mpicc' found in PATH at '$p'; removing since no MPI was requested ..."
export PATH=$(echo $PATH | sed -e "s|:${p}||;s|${p}:||;")
done
;;
*)
die 1 "Unknown MPI library '${mpi}' - please choose one of: $supported_mpilibs"
;;
esac
# load the compiler and libs
module load binutils/2.20.1
case "$compiler" in
gcc412)
module load gcc/4.1.2
export CC=gcc
export CXX=g++
cflags='-march=nocona'
cxxflags='-march=nocona'
std_cflags='-O3'
std_cxxflags='-O3'
toolset=gcc
;;
gcc434)
# GCC 4.3.4 is std on Schroedinger after the 2011-01-29 upgrade
export CC=gcc-4.3
export CXX=g++-4.3
cflags='-march=native'
cxxflags='-march=native'
std_cflags='-O3'
std_cxxflags='-O3'
toolset=gcc
;;
gcc441)
module load gcc/4.4.1
export CC=gcc-4.4.1
export CXX=g++-4.4.1
cflags='-march=nocona'
cxxflags='-march=nocona'
std_cflags='-O3'
std_cxxflags='-O3'
toolset=gcc
;;
gcc443)
module load gcc/4.4.3
export CC=/panfs/panfs0.ften.es.hpcn.uzh.ch/share/software/Compilers/gcc-4.4.3-build/bin/gcc
export CXX=/panfs/panfs0.ften.es.hpcn.uzh.ch/share/software/Compilers/gcc-4.4.3-build/bin/g++
# however, we need to tell gcc that it has to use the 4.4.3
# libstdc++, otherwise it will try to use the system default one
export LD_RUN_PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/share/software/Compilers/gcc-4.4.3-build/lib:$LD_RUN_PATH
export LD_LIBRARY_PATH=/panfs/panfs0.ften.es.hpcn.uzh.ch/share/software/Compilers/gcc-4.4.3-build/lib:$LD_LIBRARY_PATH
cflags='-march=native'
cxxflags='-march=native'
std_cflags='-O3'
std_cxxflags='-O3'
toolset=gcc
;;
gcc450)
# GCC 4.5 provided by SLES11 package after 2011-02-29 upgrade
#module load gcc/4.5.0
export CC=gcc-4.5
export CXX=g++-4.5
# however, we need to tell gcc that it has to use the 4.5
# libstdc++, otherwise it will try to use the 4.3 one
export LD_RUN_PATH=/usr/lib64/gcc/x86_64-suse-linux/4.5:$LD_RUN_PATH
export LD_LIBRARY_PATH=/usr/lib64/gcc/x86_64-suse-linux/4.5:$LD_LIBRARY_PATH
cflags='-march=native'
cxxflags='-march=native'
std_cflags='-O3'
std_cxxflags='-O3'
toolset=gcc
;;
icc|intel)
module load intel/comp/11.1.064
export CC=icc
export CXX=icpc
# c(xx)flags are the equivalent of `-fast` w/out the `-static`
cflags='-xHOST -O3 -ipo -no-prec-div'
cxxflags='-xHOST -O3 -ipo -no-prec-div'
std_cflags='-O3'
std_cxxflags='-O3'
toolset=intel
;;
*)
die 1 "Unknown compiler flavor '${compiler}' - please choose one of: $supported_compilers"
;;
esac
# MPI wrappers should use the default compiler
export I_MPI_CC=`which $CC`
export I_MPI_CXX=`which $CXX`
export MPICH_CC=`which $CC`
export MPICH_CXX=`which $CXX`
export OMPI_CC=`which $CC`
export OMPI_CXX=`which $CXX`
# enable local sw
sw="$HOME/sw/${compiler}-${mpi}"
PATH=${sw}/bin:$PATH; export PATH
LD_LIBRARY_PATH=${sw}/lib:$LD_LIBRARY_PATH; export LD_LIBRARY_PATH
}
show_build_information () {
echo === running info ===
echo flavor: $flavor
echo revno: $rev
echo compiler: $compiler
echo mpi: $mpi
echo node: $(hostname)
echo === compiler information ===
which ${CXX}
set -e # exit on error here, in case the compiler does not have enough licences
${CXX} --version
set +e
if [ "x${mpi}" != 'xnone' ]; then
which mpicxx
mpicxx --version
echo
fi
which as
as --version
echo
}
prepare_openmpi_environment () {
require_environment_var PE_HOSTFILE
require_environment_var TMPDIR
cat $PE_HOSTFILE | \
(while read hostname nslots queue rest;
do echo "$hostname slots=$nslots"; done) > $TMPDIR/hostfile
cat $PE_HOSTFILE | \
(while read hostname nslots queue rest;
do echo "$hostname"; done) | uniq > $TMPDIR/hostfile.uniq
set -e
eval `ssh-agent -s`
ssh-add $HOME/.ssh/id_dsa
set +e
gmca="--gmca plm_rsh_agent $HOME/bin/qrsh.sh"
hostfile="$TMPDIR/hostfile"
hostfile_uniq="$TMPDIR/hostfile.uniq"
}
## generic functions
die () {
rc="$1"
shift
(echo -n "$PROG: ERROR: ";
if [ $# -gt 0 ]; then echo "$@"; else cat; fi) 1>&2
exit $rc
}
have_command () {
type "$1" >/dev/null 2>/dev/null
}
require_command () {
if ! have_command "$1"; then
die 1 "Could not find required command '$1' in system PATH. Aborting."
fi
}
require_environment_var () {
if [ -z "$(eval echo '$'$1)" ]; then
die 1 "Require environment variable '$1' is not defined or has empty value. Aborting."
fi
}
is_absolute_path () {
expr match "$1" '/' >/dev/null 2>/dev/null
}
_ () {
echo
echo ==== "$@" ...;
}
| riccardomurri/rheinfall | util/schroedinger/functions.sh | Shell | gpl-3.0 | 10,051 |
echo "remove old data"
rm MeganServer-standalone.zip
rm -r MeganServer-standalone/
echo
echo
echo
echo
echo "Building MEGANServer"
echo
echo
gradle assemble
echo
echo
echo "Building finished"
echo
echo
echo
echo "create archive"
mkdir MeganServer-standalone
cp ../../manual/manual.pdf MeganServer-standalone/
cp -r bin MeganServer-standalone/
cp -r lib MeganServer-standalone/
cp -r log MeganServer-standalone/
cp build/libs/MeganServer.war MeganServer-standalone/lib/
rm MeganServer-standalone/log/*
cp -r properties MeganServer-standalone/
zip -r MeganServer-standalone.zip MeganServer-standalone
echo
echo
echo
echo "cleaning up"
gradle clean
rm -r MeganServer-standalone
echo
echo
echo
echo "DONE"
| danielhuson/megan6server | gradlebuild/standalone/buildCompressClean.sh | Shell | gpl-3.0 | 703 |
#!/bin/bash
#
subptcli=$1
# Get user variables
if [[ -f ./.user_input ]] ; then
echo ""
echo "Previous user input found."
echo ""
cat ./.user_input
echo ""
star=$(cat .user_input | grep lrStar | awk '{print $2}')
subptclno=$(cat .user_input | grep lrSubptclno | awk '{print $2}')
apix=$(cat .user_input | grep lrApix | awk '{print $2}')
box=$(cat .user_input | grep lrBox | awk '{print $2}')
length=$(cat .user_input | grep lrLength | awk '{print $2}')
newbox=$(cat .user_input | grep lrNewbox | awk '{print $2}')
project=$(cat .user_input | grep lrProject | awk '{print $2}')
ptcldir=$(cat .user_input | grep lrPtcldir | awk '{print $2}')
maskdir=$(cat .user_input | grep lrMaskdir | awk '{print $2}')
res=$(cat .user_input | grep lrResolution | awk '{print $2}')
ctf=$(cat .user_input | grep lrCtf | awk '{print $2}')
echo "Press Enter to continue or ctrl-c to quit and delete .user_input"
read p
else
echo "LocalRec parameters" > .user_input
echo "Data star file which points to your whole particle stacks. i.e. ./star/run_data.star"
read star
echo "lrStar: ${star}" >> .user_input
echo "The number of sub-particles you are extracting i.e. number of masks or cmm vectors"
read subptclno
echo "lrSubptclno: ${subptclno}" >> .user_input
echo "The pixel size of the data"
read apix
echo "lrApix: ${apix}" >> .user_input
echo "Original particle box size (px)"
read box
echo "lrBox: ${box}" >> .user_input
echo "Distance from centre of whole particle to subparticle in Angstroms i.e. average cmm marker length"
echo "Can set to auto"
read length
echo "lrLength: ${length}" >> .user_input
echo "The size of the box in which sub-particles will be extracted (px)"
read newbox
echo "lrNewbox: ${newbox}" >> .user_input
echo "The name that will be appended to all sub-particle extractions"
read project
echo "lrProject: ${project}" >> .user_input
echo "The directory name used for the extracted sub-particles"
read ptcldir
echo "lrPtcldir: ${ptcldir}" >> .user_input
echo "Mask location, leave empty for no partial singla subtraction"
read maskdir
echo "lrMaskdir: ${maskdir}" >> .user_input
echo "Original reconstruction resolution (for lowpass filtering subparticle volumes)"
read res
echo "lrResolution: ${res}" >> .user_input
echo "CTF correction behaviour for subparticle volumes, provide --ctf or blank"
read ctf
echo "lrCtf: ${ctf}" >> .user_input
fi
#start at subparticle number
if [ -z $1 ] ; then
i=1
else
i=$1
fi
#Inform that cmm_marker lengths will be pulled from log files
if [[ $length == auto ]] ; then
echo "Sub-particle length is set to auto..."
echo "Subparticle length parameter will be pulled from ./cmm_markers/logs/*log files"
if [[ -f ./cmm_markers/logs/marker_distance_stats.log ]] ; then
echo ""
echo "marker_distance_stats.log exists, proceeding"
echo ""
cat ./cmm_markers/logs/marker_distance_stats.log
echo ""
else
echo ""
echo "marker_distance_stats.log does not exist, are you sure you want to continue?"
read p
fi
else
echo "Sub-particle length is set manually to ${length}..."
fi
## Check for relion1.4
command -v relion >/dev/null 2>&1 || { echo >&2 "Relion does not appear to be installed or sourced..."; exit 1; }
exe=$(which relion | grep 1.4)
if [[ -z ${exe} ]]; then
echo 'Relion installation:'
which relion
echo ''
echo 'Path does not contain 1.4, are you sure this is Relion-1.4?'
echo 'Press Enter to continue or ctrl-c to quit and fix your Relion version'
read p
else
echo 'Relion installation:'
which relion
echo 'Found 1.4 in path, appears to be Relion-1.4'
echo 'Looks good, continuing...'
fi
#Important info
echo ""
echo "The sub-particle count is set to ${subptclno}"
echo ""
echo "Will start at sub-particle " $i
echo ""
echo 'Input star: ' $star
echo 'Subparticle no: ' $subptclno
echo 'apix: ' $apix
echo 'Input box size (px): ' $box
echo 'Vector length to subparticle (A):' $length
echo 'New box size (px): ' $newbox
echo 'Project name: ' $project
echo 'Subparticle directory name: ' $ptcldir
echo 'Masks for subtraction location: ' $maskdir
echo ""
echo "Relion version currently sourced:"
which relion
echo ""
read -p "Press [Enter] key to confirm and run script..."
echo ""
echo "Woud you like to overwrite any preexisting subparticle extractions (y/n)?"
read p
## Set up job monitoring
if [[ -f .localrec_progress ]] ; then
echo ".localrec_progress exists"
else
echo ".localrec_progress does not exist"
echo "Localrec subparticle extraction progress:" > .localrec_progress
fi
## Timestamp function
function timestamp() {
date +%F_%T
}
## Overwrite or not
if [ $p == "y" ] ; then
echo "Overwriting preexisting subparticles..."
sed -i "/${ptcldir}/d" .localrec_progress
echo ""
elif [ $p == "n" ] ; then
echo "No overwrite. Only doing unfinished subparticle extraction..."
echo ""
fi
if [ -z $p ] ; then
echo "Did not understand input, exiting..."
exit
fi
## Set up partilces for relion localized reconstruction
j=$(($subptclno+1))
if [[ $length == auto ]] ; then
autolength=1
fi
while [ $i -lt $j ] ; do
#Search .localrec_progress and assess whether subparticles have previously been extracted
localrec_progress=$(grep ${ptcldir} .localrec_progress | grep localrec_subparticles_${i})
#Get distance to subparticle from cmm_marker log file
if [[ $autolength == 1 ]] ; then
length=$(cat cmm_markers/logs/marker_${i}_distance.log | grep "minimum distance" | awk '{print $8}')
echo ""
echo "Distance is set to auto, reading distance from cmm log file..."
echo "Distance to subparticle, length is ${length} for marker_${i}"
echo ""
fi
if [[ -n $localrec_progress ]] ; then
echo $localrec_progress
echo "Skipping localrec subparticle extraction ${ptcldir} ${i}, already processed"
else
if [[ -z $maskdir ]] ; then
#Do subparticle extraction without signal subtraction
echo "Running subparticle extraction without signal subraction"
echo ""
echo "scipion run relion_localized_reconstruction.py --prepare_particles --create_subparticles --align_subparticles --extract_subparticles --sym C1 --cmm cmm_markers/marker_${i}.cmm --angpix ${apix} --particle_size ${box} --length ${length} --subparticle_size ${newbox} --output ${ptcldir}/localrec_${project}_${i} ${star}"
scipion run relion_localized_reconstruction.py --prepare_particles --create_subparticles --align_subparticles --extract_subparticles --sym C1 --cmm cmm_markers/marker_${i}.cmm --angpix ${apix} --particle_size ${box} --length ${length} --subparticle_size ${newbox} --output ${ptcldir}/localrec_${project}_${i} ${star}
else
echo "Running subparticle extraction with signal subraction"
echo ""
#Do subparticle extraction with signal subtraction
echo "scipion run relion_localized_reconstruction.py --prepare_particles --masked_map ${maskdir}/mask${i}_subtraction_soft.mrc --create_subparticles --align_subparticles --extract_subparticles --sym C1 --cmm cmm_markers/marker_${i}.cmm --angpix ${apix} --particle_size ${box} --length ${length} --subparticle_size ${newbox} --output ${ptcldir}/localrec_${project}_${i} ${star}"
scipion run relion_localized_reconstruction.py --prepare_particles --masked_map ${maskdir}/mask${i}_subtraction_soft.mrc --create_subparticles --align_subparticles --extract_subparticles --sym C1 --cmm cmm_markers/marker_${i}.cmm --angpix ${apix} --particle_size ${box} --length ${length} --subparticle_size ${newbox} --output ${ptcldir}/localrec_${project}_${i} ${star}
fi
echo "${ptcldir} localrec_subparticles_${i}: completed subparticle extraction: $(timestamp)" >> .localrec_progress
fi
i=$(($i+1))
done
## Make a copy of the script so that there is a record
scp -r $0 $ptcldir
echo ""
echo "Copy of this script made in subparticle directory: ${ptcldir}"
## Make a copy of the star file that was used so that there is a record
scp -r ${star} $ptcldir
echo ""
echo "Copy of the star file used for subparticle extraction made in: "
echo "${ptcldir}"
## Make a copy of the cmm_markers so that there is a record
scp -r cmm_markers $ptcldir
echo ""
echo "Copy of the cmm_markers used for subparticle extraction made in: "
echo "${ptcldir}"
## Suggest command for joining star files
echo ""
echo "Done!"
echo "You may want to join all the subparticle star files for further classification and refinement in Relion"
echo ""
echo ""
cd $ptcldir
starlist=$(echo *${project}*star)
echo "relion_star_handler --combine \" ${starlist} \" --o localrec_${project}_all.star" > localrec_join_subparticle_star.sh
cd ..
echo "With Relion-3.0 or greater sourced execute the localrec_join_subparticle_star.sh to create a combined subparticle star file..."
echo ""
echo "Definitely done.."
echo ""
| kylelmorris/localrec_control | localrec_create_subparticles.sh | Shell | gpl-3.0 | 8,974 |
#!/bin/bash
ab -n 1000 -c 100 http://lenove:5000/
| yetship/blog_codes | python/gevent/benchmark/bench.sh | Shell | gpl-3.0 | 51 |
#######################################################
# Install fqzcomp -- FASTQ #
# - - - - - - - - - - - - - - - - - - - - #
# Morteza Hosseini [email protected] #
# Diogo Pratas [email protected] #
# Armando J. Pinho [email protected] #
#######################################################
#!/bin/bash
rm -f fqzcomp-4.6.tar.gz
url="https://downloads.sourceforge.net/project/fqzcomp"
wget $WGET_OP $url/fqzcomp-4.6.tar.gz
tar -xzf fqzcomp-4.6.tar.gz
mv fqzcomp-4.6/ fqzcomp/ # Rename
mv fqzcomp/ $progs/
rm -f fqzcomp-4.6.tar.gz
cd $progs/fqzcomp/
make
cd ../.. | pratas/cryfa | script/ins_fqzcomp.sh | Shell | gpl-3.0 | 739 |
#!/bin/bash
if [ $(id -u) -ne 0 ]; then
echo >&2 "Must be run as root"
exit 1
fi
set -e
set -x
. config.env
pushd /home/$NORMAL_USER/GitHub/companion/Common/Ubuntu/vision_to_mavros
time ./install_vision_to_mavros.sh
popd
tput setaf 2
echo "Finished installing vision_to_mavros Pose Scripts"
tput sgr0
| diydrones/companion | RPI2/Ubuntu/7_setup_vision_to_mavros.sh | Shell | gpl-3.0 | 312 |
#!/bin/bash
while :
do
cp /media/fritzbox/faxbox/*.pdf /var/www/html/alarmdisplay-ffw/fritzbox/faxbox/
sleep 7
done
| windele/alarmdisplay-ffw | fritzbox/fritzbox_fax_synchron.sh | Shell | gpl-3.0 | 118 |
#!/usr/bin/env bash
scriptName="stampPDF"
installPath="/usr/bin/pdfForts/"
serviceMenus="/usr/share/kservices5/ServiceMenus/pdfForts/"
configFile="y"
templateFile="y"
templateName="stampPDF.odt"
| sjau/pdfForts | stampPDF/vars.sh | Shell | gpl-3.0 | 196 |
#!/bin/bash
cd fu-hive-shell/
git fetch --all
git reset --hard origin/master
git pull
echo "给脚本添加权限:"
echo `pwd`
find . -name "*.sh" -exec chmod 775 {} \;
| zhouchaochao/learning | doc/src/main/doc/git/强制下拉.sh | Shell | gpl-3.0 | 171 |
#!/bin/bash
gnorpm-auth -U $*
| kernt/linuxtools | gnome3-shell/nautilus-scripts/System/rpm/Install-Rpm.sh | Shell | gpl-3.0 | 30 |
#!/bin/bash
# Powered by Gustavo Moreno Baez
sub=`zenity --entry --title="Action Selection" --text="1.-Merge PDF
2.- Section Off Pages
3.- Extract Pages
4.- Insert watermark (watermark above-stamp-)
5.- Insert watermark (watermark below-background-)
6.- Protect PDF
7.- PDF Repair
8.- Convert to PDF (Any format supported by OpenOffice !!!!!!!)
9.- Removing passwords (Run from the console open to see: user (to open)/owner (for editing))-Requires pdfcrack (available in Ubuntu repos)- 3 characters faster, bruteforce laborious character"`
case $sub in
# Merge PDF
1)
clear
Selection=`zenity --title "Selection in order to want to join pdfs" --file-selection --multiple|sed -e 's/|/\" \"/g'`
echo \"$Selection\"
echo "Has the following PDF Selected \"$Selection\""
Output=$(zenity --file-selection --title "Route Selection and writes the name of the PDF you want to create " --save --confirm-overwrite)
echo pdftk \"$Selection\" cat output \"$Output\">/home/$USER/Desk/temporal_mixing
chmod +x /home/$USER/Desk/temporal_mixing
/home/$USER/Desk/temporal_mixing
rm /home/$USER/Desk/temporal_mixing
;;
# Section Off Pages
2)
FILE=`zenity --file-selection --title="Selection pdf file which you want to extract each of the pages as separate files"`
case $? in
0)
echo "\"$FILE\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "An unexpected error has occurred.";;
esac
output=$(zenity --file-selection --save --confirm-overwrite);
pdftk "$FILE" burst output "$output"_%02d.pdf
;;
# Extract Pages
3)
clear
FILE=`zenity --file-selection --title="Selection pdf file which you want to extract pages"`
case $? in
0)
echo "\"$FILE\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "An unexpected error has occurred.";;
esac
FROM=`zenity --entry --title="Selection of first page" --text="Number of the first page you want to extract"`
UP=`zenity --entry --title="Selecting last page" --text="Number of the first page you want to extract"`
output=$(zenity --file-selection --save --confirm-overwrite);
pdftk A="$FILE" cat 'A'$FROM-$UP output "$output"
echo "pdftk A="$FILE" cat "$FILE"$FROM-$UP "$FILE2" output "$output""
;;
# Insert watermark (stamp)-Requires a PDF with transparency
4)
clear
FILE=`zenity --file-selection --title="Select the pdf file to insert the watermark"`
case $? in
0)
echo "\"$FILE\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "No fie selected.";;
esac
zenity --info \
--text="When you accept, this opens a dialog box to select the file that will watermark. The file you select as watermark should be pdf with a transparent image as this will be placed on top of your document, not worth a jpg."
FILE2=`zenity --file-selection --title="Selection pdf file that will watermark"`
case $? in
0)
echo "\"$FILE2\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "No file selected.";;
esac
output=$(zenity --file-selection --save --confirm-overwrite);echo $output
pdftk "$FILE" stamp "$FILE2" output "$output"
;;
# Insert watermark (background)
5)
clear
FILE=`zenity --file-selection --title="Select the pdf file to insert the watermark"`
case $? in
0)
echo "\"$FILE\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "No file selected.";;
esac
zenity --info \
--text="When you accept, this opens a dialog box to select the file that will watermark. The file you select as watermark should be pdf, not worth a jpg. With this program you can move the image to PDF."
FILE2=`zenity --file-selection --title="Select the pdf file that will watermark"`
case $? in
0)
echo "\"$FILE2\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "No file selected.";;
esac
output=$(zenity --file-selection --save --confirm-overwrite);echo $output
pdftk "$FILE" background "$FILE2" output "$output"
;;
# Protect PDF
6)
clear
FILE=`zenity --file-selection --title="Selection pdf file which you want to extract each of the pages as separate files"`
case $? in
0)
echo "\"$FILE\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "An unexpected error has occurred.";;
esac
output=$(zenity --file-selection --save --confirm-overwrite);
USER=`zenity --entry --title="OWNER PASSWORD" --text="USER Enter a name without spaces (necessary to revoke / grant privileges in the future)"`
option=`zenity --entry --title="KEY TO OPENING" --text="Do you require a password to open? Write s (yes) or n (no)"`
if test $option = n
then
PASSWORD=no
else
PASSWORD=`zenity --entry --title="--- OPENING PASSWORD without spaces and different from USER ---" --text="Enter a password (required to open the document)"`
fi
zenity --info \
--text="options: Printing = Print It allows high quality printing is allowed ; DegradedPrinting = low quality; ModifyContents = Edit content, even reassembled, Assembly = is to extract / join pages; CopyContents = content may be copied and screen readers; ScreenReaders = It allows screen readers; ModifyAnnotations = allowed to modify the annotations including form filling, Fillin = filling is allowed AllFeatures = form of the above are allowed "
PERMITS=`zenity --entry --title="PERMITS" --text="Enter each option separated by spaces: Printing DegradedPrinting ModifyContents ScreenReaders ModifyAnnotations FillIn AllFeatures"`
if test $option = n
then
pdftk "$FILE" output "$output" owner_pw $USER allow $PERMITS
else
pdftk "$FILE" output "$output" owner_pw $USER user_pw $PASSWORD allow $PERMITS
fi
;;
# Repair PDF
7)
clear
FILE=`zenity --file-selection --title="Selection of corrupt PDF file"`
case $? in
0)
echo "\"$FILE\" Selected.";;
1)
echo "No file selected.";;
-1)
echo "An unexpected error has occurred.";;
esac
output=$(zenity --file-selection --save --confirm-overwrite);
pdftk "$FILE" output "$output"
;;
# Convert to PDF ::::: Convert any format supported by OpenOffice :::::: Requires PDF and OpenOffice Cups
8)
clear
FILE=`zenity --file-selection --title="Select the document you want to convert to PDF (. Odt,. Doc, jpeg ... etc - All supported by OpenOffice ---)"`
case $? in
0)
echo "\"$FILE\" selected.";;
1)
echo "You have not selected any files.";;
-1)
echo "An unexpected error has occurred.";;
esac
zenity --info \
--text="Save the document is generated in \"$USER\"/Folder defined in CUPS-PDF (Vist http://www.guia-ubuntu.org/index.php?title=Instalar_impresora#Instalar_una_impresora_PDF or execute script http://www.atareao.es/ubuntu/conociendo-ubuntu/instalar-una-impresora-pdf-en-ubuntu-con-un-script)/ "
soffice -pt PDF "$FILE"
;;
# Retrieve the user key (to open) / owner (for motification) a pdf
9)
clear
FILE=`zenity --file-selection --title="Select the document for which you want to remove the key"`
case $? in
0)
echo "\"$FILE\" selected.";;
1)
echo "You have not selected any files.";;
-1)
echo "An unexpected error has occurred.";;
esac
zenity --info \
--text="To use this option, you must install the library pdfcrack (available in Ubuntu repos), if the key is-ej +3 digits takes 1 hour minimum 5 characters "
pdfcrack -o "$FILE"
esac
| kernt/linuxtools | gnome3-shell/nautilus-scripts/System/File Processing/Pdf-Tools/Nautilus-Pdf-Tools.sh | Shell | gpl-3.0 | 8,405 |
#!/usr/bin/env bash
# to-do: install via anaconda
# to-do: install basemap, cartopy
pips=(
-c\ scitools\ cartopy
jupyter
matplotlib
nltk
numpy
pandas
pillow
-c\ scitools\ pyshp
scipy
shapely
scikit-learn
)
for pip in "${pips[@]}"; do
conda install -y "$pip"
# pip2 install "$pip" -U
# pip3 install "$pip" -U
done
python -m nltk.downloader all
| GCDigitalFellows/drbdotfiles | etc/pip.sh | Shell | gpl-3.0 | 374 |
#!/bin/bash
echo "alias net0-pf-10 off" >> /etc/modprobe.conf
echo "options ipv6 disable=1" >> /etc/modprobe.conf
echo 256960 > /proc/sys/net/core/rmem_default
echo 256960 > /proc/sys/net/core/rmem_max
echo 256960 > /proc/sys/net/core/wmem_default
echo 256960 > /proc/sys/net/core/wmem_max
echo 0 > /proc/sys/net/ipv4/tcp_timestamps
echo 1 > /proc/sys/net/ipv4/tcp_sack
echo 1 > /proc/sys/net/ipv4/tcp_window_scaling
echo 10 > /proc/sys/net/ipv4/tcp_fin_timeout
| John3/Jx | scripts/deshabilitar_ipv6.sh | Shell | gpl-3.0 | 467 |
echo =============== INSTALLING PYTHON3 ===============
sudo apt-get install -y python3.6 python3.6-dev libssl-dev
if [ $? -ne 0 ]
then
echo apt install failed to run, maybe need root privileges?
exit 1
fi
curl https://bootstrap.pypa.io/get-pip.py | sudo python3.6
if [ $? -ne 0 ]
then
python3.6 -m pip
if [ $? -ne 0 ]
then
echo get-pip install failed to install, maybe need root privileges?
exit 1
fi
fi
echo =============== DONE INSTALLING PYTHON3 ===============
echo =============== INSTALLING pip-vex ===============
python3.6 -m pip install --user vex
if [ $? -ne 0 ]
then
echo failed to install vex
exit 1
fi
echo =============== DONE INSTALLING vex ===============
echo BUILD DEPENDENCIES FINISHED INSTALLING
echo To build, you should now be able to use 'vex -mr foo ./scripts/build.sh'
| purduesigbots/pros-cli | scripts/install_build_dependencies.sh | Shell | mpl-2.0 | 840 |
echo "polling command of devices with same service and subservices, different entity_type"
export HOST_IOT=127.0.0.1:8080
echo "HOST_IOT $HOST_IOT ip and port for iotagent"
export HOST_CB=127.0.0.1:1026
echo "HOST_CB $HOST_CB ip and port for CB (Context Broker)"
export SERVICE=serv22
echo "SERVICE $SERVICE to create device"
export SRVPATH=/srf1
echo "SRVPATH $SRVPATH1 new service_path to create device"
#general functions
trap "exit 1" TERM
export TOP_PID=$$
declare -i number_errors=0
function assert_code()
{
res_code=` echo $res | grep -c "#code:$1"`
if [ $res_code -eq 1 ]
then
echo " OKAY"
else
echo " ERROR: " $2
((number_errors++))
kill -s TERM $TOP_PID
fi
}
function assert_contains()
{
if [[ "$res" =~ .*"$1".* ]]
then
echo " OKAY"
else
echo " ERROR: " $2
((number_errors++))
kill -s TERM $TOP_PID
fi
}
# TEST
echo "create $SERVICE $SRVPATH for ul"
res=$( curl -X POST http://$HOST_IOT/iot/services \
-i \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"services": [{ "apikey": "apikeyul", "token": "tokenul", "cbroker": "http://10.95.213.36:1026", "entity_type": "thingul", "resource": "/iot/d" }]}' )
assert( res , 200)
echo "create $SERVICE $SRVPATH for ul20"
res=$( curl -X POST http://$HOST_IOT/iot/services \
-i \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"services": [{ "apikey": "apikeyul20", "token": "tokenul20", "cbroker": "http://10.95.213.36:1026", "entity_type": "thingmqtt", "resource": "/iot/mqtt" }]}' )
echo "create device for ul"
res=$( curl -X POST http://$HOST_IOT/iot/devices \
-i \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"devices":[{"device_id":"sensor_ul","protocol":"PDI-IoTA-UltraLight", "commands": [{"name": "PING","type": "command","value": "sensor_ul@command|%s" }]}]}' )
echo "create device for ul20"
res=$( curl -X POST http://$HOST_IOT/iot/devices \
-i \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"devices":[{"device_id":"sensor_mqtt","protocol":"PDI-IoTA-MQTT-UltraLight", "commands": [{"name": "PING","type": "command","value": "sensor_mqtt@command|%s" }]}]}' )
echo "check type thingul to iotagent"
res=$( curl -X GET http://$HOST_IOT/iot/devices/sensor_ul \
-i \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" )
echo "check type thingmqtt to CB"
res=$( curl -X POST http://$HOST_CB/v1/queryContext \
-i \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"entities": [{ "id": "thingul:sensor_ul", "type": "thingul", "isPattern": "false" }]}' )
echo "comprobar que viene PING"
echo "check type thingul20"
res=$( curl -X GET http://$HOST_IOT/iot/devices/sensor_mqtt \
-i \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" )
echo "check type thingul20 to CB"
res=$( curl -X POST http://$HOST_CB/v1/queryContext \
-i \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"entities": [{ "id": "thingmqtt:sensor_mqtt", "type": "thingmqtt", "isPattern": "false" }]}' )
echo "OJO este no tiene PING, pero e registro ha sido bueno"
res=$( curl -X POST http://$HOST_CB/v1/updateContext \
-i -s -w "#code:%{http_code}#" \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" \
-d '{"updateAction":"UPDATE","contextElements":[{"id":"thingul:sensor_ul","type":"thingul","isPattern":"false","attributes":[{"name":"PING","type":"command","value":"22","metadatas":[{"name":"TimeInstant","type":"ISO8601","value":"2014-11-23T17:33:36.341305Z"}]}]} ]}' )
echo $res
assert_code 200 "device already exists"
#assert_contains '{"updateAction":"UPDATE","contextElements":[{"id":"thingul:sensor_ul","type":"thingul","isPattern":"false","attributes":[{"name":"PING","type":"command","value":"22","metadatas":[{"name":"TimeInstant","type":"ISO8601","value":"2014-11-23T17:33:36.341305Z"}]}]} ]}'
echo "devuelve ping y 200"
echo "queryContext para ver e PING_status pending"
echo "sleep"
echo "queryContext para ver e PING_status expired_read"
----> ERROR se crea una entidad nueva "id" : "thing:sensor_ul", con "PING_status" : { "expired read" }
echo "150- delete service mqtt"
res=$( curl -X DELETE "http://$HOST_IOT/iot/services?resource=/iot/mqtt&device=true" \
-i -s -w "#code:%{http_code}#" \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" )
assert_code 204 "device already exists"
echo "160- delete service ul20"
res=$( curl -X DELETE "http://$HOST_IOT/iot/services?resource=/iot/d&device=true" \
-i -s -w "#code:%{http_code}#" \
-H "Content-Type: application/json" \
-H "Fiware-Service: $SERVICE" \
-H "Fiware-ServicePath: $SRVPATH" )
assert_code 204 "device already exists"
| hilgroth/fiware-IoTAgent-Cplusplus | scripts/polling_same_service_subservice.sh | Shell | agpl-3.0 | 5,161 |
#!/bin/bash
# Expected parameters
# $1 exec_run
# $2 para_run
# $3 reference output
# $4 test output
eval "$1 $2" 2>&1 | tee screen.log
rtcode="${PIPESTATUS[0]}"
echo $rtcode > exec.exit
echo -e "\n\n"
if [ -z "`grep "Expected to abort!" $3`" ];then
# This case is expected to run normally
if [ "$rtcode" != "0" ]; then
# CMake will clear target output if command exit with non-zero state
cp $4 "$4".save
fi
exit $rtcode
else
# This case is expected to fail
if [ "$rtcode" == "0" ]; then
# Success is unexpected.
echo "EXEC: Program finished normally but failure is expected!"
echo ""
cp $4 "$4".surprise
exit 1
else
echo "EXEC: Program aborted as expected."
echo ""
echo "Expected to abort!" >> $4
# Then continue to compare output
exit 0
fi
fi
| QiaoLei-88/NSolver | utilities/script/regTestDriver.sh | Shell | lgpl-2.1 | 873 |
#!/bin/bash
cd /home/netarkiv/test/conf/
echo Starting all applications on: 'sb-test-acs-001.statsbiblioteket.dk'
if [ -e ./start_ViewerProxyApplication.sh ]; then
./start_ViewerProxyApplication.sh
fi
| netarchivesuite/netarchivesuite-svngit-migration | tests/dk/netarkivet/deploy/data/originals/test_target/sb-test-acs-001.statsbiblioteket.dk/startall.sh | Shell | lgpl-2.1 | 208 |
if [ "$CI_PULL_REQUEST" = "9389" ] || [ "$CI_BRANCH" = "set-implicits" ]; then
equations_CI_REF=set-implicits
equations_CI_GITURL=https://github.com/SkySkimmer/Coq-Equations
mtac2_CI_REF=set-implicits
mtac2_CI_GITURL=https://github.com/SkySkimmer/Mtac2
fi
| ppedrot/coq | dev/ci/user-overlays/09389-SkySkimmer-set-implicits.sh | Shell | lgpl-2.1 | 275 |
#!/bin/bash
set -e -o pipefail
. ../../../test_common.sh
test_init test_api_xccdf_unittests.log
#
# API C Tests
#
test_run "xccdf:complex-check -- NAND is working properly" ./test_xccdf_shall_pass $srcdir/test_xccdf_complex_check_nand.xccdf.xml
test_run "xccdf:complex-check -- single negation" ./test_xccdf_shall_pass $srcdir/test_xccdf_complex_check_single_negate.xccdf.xml
test_run "Certain id's of xccdf_items may overlap" ./test_xccdf_shall_pass $srcdir/test_xccdf_overlaping_IDs.xccdf.xml
test_run "Test Abstract data types." ./test_oscap_common
test_run "xccdf_rule_result_override" $srcdir/test_xccdf_overrides.sh
test_run "Assert for environment" [ ! -x $srcdir/not_executable ]
test_run "Assert for environment better" $OSCAP oval eval --id oval:moc.elpmaxe.www:def:1 $srcdir/test_xccdf_check_content_ref_without_name_attr.oval.xml
#
# General XCCDF Tests. (Mostly, oscap xccdf eval)
#
test_run "Fix containing unresolved elements" $srcdir/test_remediate_unresolved.sh
test_run "Empty XCCDF variable element" $srcdir/test_empty_variable.sh
test_run "Test xccdf:fix/xccdf:instance elements" $srcdir/test_fix_instance.sh
test_run "Escaping of xml & within xccdf:value" $srcdir/test_xccdf_xml_escaping_value.sh
test_run "check/@negate" $srcdir/test_xccdf_check_negate.sh
test_run "check/@multi-check import/export" $srcdir/test_xccdf_check_multi_check.sh
test_run "check/@multi-check simple" $srcdir/test_xccdf_check_multi_check2.sh
test_run "check/@multi-check that has zero definitions" $srcdir/test_xccdf_check_multi_check_zero_definitions.sh
test_run "xccdf:check-content-ref without @name" $srcdir/test_xccdf_check_content_ref_without_name_attr.sh
test_run "without xccdf:check-content-refs" $srcdir/test_xccdf_check_without_content_refs.sh
test_run "xccdf:refine-rule/@weight shall not be exported" $srcdir/test_xccdf_refine_rule.sh
test_run "xccdf:refine-rule shall refine rules" $srcdir/test_xccdf_refine_rule_refine.sh
test_run "xccdf:fix/@distruption|@complexity shall not be exported" $srcdir/test_xccdf_fix_attr_export.sh
test_run "xccdf:complex-check/@operator=AND -- notchecked" $srcdir/test_xccdf_complex_check_and_notchecked.sh
test_run "Check Processing Algorithm -- complex-check priority" $srcdir/test_xccdf_check_processing_complex_priority.sh
test_run "Check Processing Algorithm -- bad refine must select check without @selector" $srcdir/test_xccdf_check_processing_selector_bad.sh
test_run "Check Processing Algorithm -- none selected for candidate" $srcdir/test_xccdf_check_processing_selector_empty.sh
test_run "Check Processing Algorithm -- none check-content-ref resolvable." $srcdir/test_xccdf_check_processing_invalid_content_refs.sh
test_run "Check Processing Algorithm -- always include xccdf:check" $srcdir/test_xccdf_notchecked_has_check.sh
test_run "xccdf:select and @cluster-id -- disable group" $srcdir/test_xccdf_selectors_cluster1.sh
test_run "xccdf:select and @cluster-id -- enable a set of items" $srcdir/test_xccdf_selectors_cluster2.sh
test_run "xccdf:select and @cluster-id -- complex example" $srcdir/test_xccdf_selectors_cluster3.sh
test_run "Deriving XCCDF Check Results from OVAL Definition Results" $srcdir/test_deriving_xccdf_result_from_oval.sh
test_run "Deriving XCCDF Check Results from OVAL Definition Results 2" $srcdir/test_deriving_xccdf_result_from_oval2.sh
test_run "Deriving XCCDF Check Results from OVAL without definition." $srcdir/test_oval_without_definition.sh
test_run "Deriving XCCDF Check Results from OVAL Definition Results + multi-check" $srcdir/test_deriving_xccdf_result_from_oval_multicheck.sh
test_run "Multiple oval files with the same basename." $srcdir/test_multiple_oval_files_with_same_basename.sh
test_run "Unsupported Check System" $srcdir/test_xccdf_check_unsupported_check_system.sh
test_run "Multiple xccdf:TestResult elements" $srcdir/test_xccdf_multiple_testresults.sh
test_run "default selector for xccdf value" $srcdir/test_default_selector.sh
test_run "inherit selector for xccdf value" $srcdir/test_inherit_selector.sh
test_run "incorrect selector for xccdf value" $srcdir/test_xccdf_refine_value_bad.sh
test_run "Exported arf results from xccdf without reference to oval" $srcdir/test_xccdf_results_arf_no_oval.sh
test_run "XCCDF Substitute within Title" $srcdir/test_xccdf_sub_title.sh
test_run "libxml errors handled correctly" $srcdir/test_unfinished.sh
#
# Tests for 'oscap xccdf eval --remediate' and substitution
#
test_run "XCCDF Remediation Simple Test" $srcdir/test_remediation_simple.sh
test_run "XCCDF Remediation Bad Fix Fails to Remedy" $srcdir/test_remediation_bad_fix.sh
test_run "XCCDF Remediation Substitute Simple plain-text" $srcdir/test_remediation_subs_plain_text.sh
test_run "XCCDF Remediation Substitute Empty plain-text" $srcdir/test_remediation_subs_plain_text_empty.sh
test_run "XCCDF Remediation Substitute Value by refine-value" $srcdir/test_remediation_subs_value_refine_value.sh
test_run "XCCDF Remediation Substitute Value by first value" $srcdir/test_remediation_subs_value_take_first.sh
test_run "XCCDF Remediation Substitute Value by empty selector" $srcdir/test_remediation_subs_value_without_selector.sh
test_run "XCCDF Remediation Substitute Value by its title" $srcdir/test_remediation_subs_value_title.sh
test_run "XCCDF Remediation & decoding" $srcdir/test_remediation_amp_escaping.sh
test_run "XCCDF Remediation bypass XML Comments" $srcdir/test_remediation_xml_comments.sh
test_run "XCCDF Remediation understands <[CDATA[." $srcdir/test_remediation_cdata.sh
test_run "XCCDF Remediation Aborts on unresolved element." $srcdir/test_remediation_subs_unresolved.sh
test_run "XCCDF Remediation requires fix/@system attribute" $srcdir/test_remediation_fix_without_system.sh
#
# Tests for 'oscap xccdf remediate'
#
test_run "XCCDF Remediate" $srcdir/test_remediate_simple.sh
test_run "XCCDF Remediate + python fix" $srcdir/test_remediate_python.sh
test_run "XCCDF Remediate + python fix + xhtml:object" $srcdir/test_remediate_python_subs.sh
test_run "XCCDF Remediate + perl fix" $srcdir/test_remediate_perl.sh
#
# Tests for XCCDF report
#
test_run 'generate report: xccdf:check/@selector=""' $srcdir/test_report_check_with_empty_selector.sh
test_run "generate report: missing xsl shall not segfault" $srcdir/test_report_without_xsl_fails_gracefully.sh
test_run "generate report: avoid warnings from libxml" $srcdir/test_report_without_oval_poses_no_errors.sh
test_run "generate fix: just as the anaconda does" $srcdir/test_report_anaconda_fixes.sh
test_run "generate fix: just as the anaconda does + DataStream" $srcdir/test_report_anaconda_fixes_ds.sh
test_run "generate fix: ensure filtering drop fixes" $srcdir/test_fix_filtering.sh
test_exit
| postfix/openscap | tests/API/XCCDF/unittests/all.sh | Shell | lgpl-2.1 | 6,695 |
echo 'Please wait...'
sed -ri 's/^[ \t]*(<\/?category)/ \1/' $@
sed -ri 's/^[ \t]*(<\/?(rulegroup|!DOCTYPE))/ \1/' $@
sed -ri 's/^[ \t]*(<\/?(rule |rule>|!--|!ENTITY))/ \1/' $@
sed -ri 's/^[ \t]*(<\/?(marker|suggestion|equivalence|and|or))/ \1/' $@
sed -ri 's/^[ \t]*(<\/?(antipattern|pattern|regexp|message|url|short|example|disambig|unification))/ \1/' $@
sed -ri 's/^[ \t]*(<\/?(token|unify))/ \1/' $@
sed -ri 's/^[ \t]*(<\/?(exception|feature))/ \1/' $@
echo $@' indented'
| meg0man/languagetool | languagetool-language-modules/pt/src/main/resources/org/languagetool/rules/pt/indent.sh | Shell | lgpl-2.1 | 515 |
#!/bin/bash
if [ -d "/etc/apache2" ]
then
echo "STARTING APACHE WEBSERVER"
NAME=`hostname`
BASEFOLDER=/vagrant/machines/$NAME/www/
DOCROOT=$BASEFOLDER/docroot
sudo a2ensite default-ssl.conf
sudo sed -i 's%</VirtualHost>%<Directory "/var/www/html">\nAllowOverride All\n </Directory>\nSETENV MAGE_IS_DEVELOPER_MODE true\n</VirtualHost>%g' /etc/apache2/sites-available/default-ssl.conf
sudo sed -i 's%</VirtualHost>%<Directory "/var/www/html">\nAllowOverride All\n </Directory>\nSETENV MAGE_IS_DEVELOPER_MODE true\n</VirtualHost>%g' /etc/apache2/sites-available/000-default.conf
sudo sed -i 's%;always_populate_raw_post_data = -1%always_populate_raw_post_data = -1%g' /etc/php5/apache2/php.ini
sudo rm -rf /var/www/html
sudo ln -s $DOCROOT /var/www/html
sudo a2enmod rewrite
sudo a2enmod headers
sudo a2enmod ssl
if [ ! -f $DOCROOT/.htaccess ]; then
ln -s $BASEFOLDER/.htaccess.dev $DOCROOT/.htaccess
fi
sudo service apache2 restart
fi | ProxiBlue/vagrant-docker | provision/start-apache-web.sh | Shell | lgpl-3.0 | 1,002 |
#!/sbin/sh
# @(#) $Id: get_bootdisk.sh,v 6.10.1.1 2013-09-12 16:13:15 ralph Exp $
# by Thomas Brix - works only with HPUX 11i v2 and above!!!
# not used in cfg2html yet!
lssf $(ls -l /dev/d*sk/* | grep $(echo "bootdev/x" | \
adb /stand/vmunix /dev/kmem | grep 0x | \
sed 's/0x..//') | head -1 | awk '{print $NF}') | \
awk '{print "This system last booted from " $NF " " $(NF-1)}'
| rossonet/templateAr4k | cfg2html-master/hpux/plugins/get_bootdisk.sh | Shell | lgpl-3.0 | 381 |
#!/bin/sh
#
# Copyright (C) 2013-2014 the original author or authors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License,
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
sudo apt-get update
sudo aptitude install -y build-essential
sudo apt-get install iperf -y
wget -qO- http://downloads.es.net/pub/iperf/iperf-3.0.1.tar.gz | tar xzvf -
cd iperf-3.0.1
./configure
make && sudo make install
cd ../
rm -rf iperf-3.0.1 | alessandroleite/dohko | services/src/main/resources/org/excalibur/service/deployment/resource/script/02-iperf3.sh | Shell | lgpl-3.0 | 981 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.