code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
: ${PREFIX:=~}
: ${ETC:=~}
if [ "${ETC%/}" = /etc ];
then : ${BASHRC:=bash.bashrc}
else : ${BASHRC:=.bashrc}
fi
cp session $PREFIX/bin/session
if ! grep -q '### LOAD COMPONENTS$' $ETC/$BASHRC; then
sed -i.bak "\$afor component in \$(ls $ETC/.bash.d); do source $ETC/.bash.d/$component; done ### LOAD COMPONENTS" $ETC/$BASHRC
fi
if [ ! -d $ETC/.bash.d ]; then
mkdir $ETC/.bash.d;
fi
cp session.bashrc $ETC/.bash.d
|
dlthomas/sessions
|
install.sh
|
Shell
|
mit
| 441 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2014:0453
#
# Security announcement date: 2014-04-30 19:09:25 UTC
# Script generation date: 2016-05-12 18:11:56 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - xalan-j2-eap6.noarch:2.7.1-9.redhat_7.1.ep6.el5
#
# Last versions recommanded by security team:
# - xalan-j2-eap6.noarch:2.7.1-9.redhat_7.1.ep6.el5
#
# CVE List:
# - CVE-2014-0107
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install xalan-j2-eap6.noarch-2.7.1 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2014/RHSA-2014:0453.sh
|
Shell
|
mit
| 646 |
git clone https://github.com/paulo-raca/android-openssl
|
hbirchtree/coffeecutie-meta
|
instrumentation/openssl/compile-android.sh
|
Shell
|
mit
| 56 |
@class
MathUtil(){
decrement(){
difference ${1} 1
}
difference(){
echo $((${1}-${2}))
}
format(){
if [[ ${1} -le 9 ]]; then
echo 0${1}
else
echo ${1}
fi
}
increment(){
if [[ ! ${1} ]]; then
echo $((+1))
else
sum ${1} 1
fi
}
isDivisible(){
if [[ $(modulus ${1} ${2}) == 0 ]]; then
echo true
fi
}
isEven(){
isDivisible ${1} 2
}
isOdd(){
if [[ ! $(isEven ${1}) ]]; then
echo true
fi
}
modulus(){
local var=${1}
if [[ ${var} == 0* ]]; then
local var=${var//0/}
fi
echo $((${var}%${2}))
}
exp(){
local base=${1}
local exponent=${2}
echo $((${1}**${2}))
}
product(){
local _product=1
while [[ $@ ]]; do
local _product=$((_product*${1}))
shift
done
echo ${_product}
}
quotient(){
echo $((${1}/${2}))
}
sum(){
local _sum=0
while [[ $@ ]]; do
local _sum=$((_sum+${1}))
shift
done
echo ${_sum}
}
$@
}
|
anthony-chu/bash-toolbox
|
math/util/MathUtil.sh
|
Shell
|
mit
| 927 |
#!/usr/bin/env bash
# vim: ai ts=2 sw=2 et sts=2 ft=sh
# Install or re-install phpenv with
# multiple versions of PHP on macOS.
#
# Usage:
#
# curl -L https://git.io/JvG7i | bash
#
# Bash strict mode.
set -o pipefail
set -o errexit
set -o nounset
# Allow empty globs.
shopt -s nullglob
IFS=$' '
# Check OS.
if [[ "${OSTYPE//[0-9.]/}" != "darwin" ]]; then
(>&2 echo "Error: This script is for macOS not '${OSTYPE}'.")
exit 1;
fi
brew_install() {
# Install homebrew.
if ! command -v brew 1>/dev/null; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
}
php_install() {
local PHP_VER;
local PHP_EXT;
# Install PHP versions.
for PHP_VER in "7.2" "7.3" "7.4" "8.0"; do
# Do not overwrite existing php install.
if brew list --versions "php@${PHP_VER}" 1>/dev/null; then
(>&2 echo "PHP ${PHP_VER} already installed.")
# todo: backup global composer config and update php version.
continue
fi
brew install "php@${PHP_VER}" || true
echo "date.timezone = UTC" > "$(brew --prefix)/etc/php/${PHP_VER}/conf.d/date.ini"
# Install PHP extensions.
IFS=$' '
for PHP_EXT in "opcache" "xdebug" "yaml"; do
brew install "php${PHP_VER/./}-${PHP_EXT}" 2>/dev/null || true
done;
# Cleaning up.
brew unlink "php@${PHP_VER}" || true
done;
}
phpenv_install() {
# Install phpenv.
export PHPENV_ROOT="${HOME}/.phpenv"
# shellcheck disable=SC2016
if ! command -v phpenv 1>/dev/null; then
( curl -fsSL https://raw.githubusercontent.com/phpenv/phpenv-installer/master/bin/phpenv-installer | bash ) || true
{ echo 'export PHPENV_ROOT="${HOME}/.phpenv"'
echo 'if [[ -d "${PHPENV_ROOT}" ]]; then'
echo ' export PATH="${PHPENV_ROOT}/bin:${PATH}";'
echo ' eval "$(phpenv init -)";'
echo 'fi'
} >> "${HOME}/.extra"
export PATH="${PHPENV_ROOT}/bin:${PATH}"
eval "$(phpenv init -)"
else
(>&2 echo "Updating phpenv.")
phpenv update
fi
#install 7.1.x from source
brew install autoconf bison bzip2 curl icu4c libedit libjpeg libiconv libpng libxml2 libzip openssl re2c tidy-html5 zlib mcrypt
CFLAGS=-DU_DEFINE_FALSE_AND_TRUE=1 CONFIGURE_OPTS="--with-zlib-dir=$(brew --prefix zlib) --with-bz2=$(brew --prefix bzip2) --with-curl=$(brew --prefix curl) --with-iconv=$(brew --prefix libiconv) --with-libedit=$(brew --prefix libedit) --with-readline=$(brew --prefix readline) --with-tidy=$(brew --prefix tidy-html5)" phpenv install 7.1.9
}
phpenv_versions_cleanup() {
local _shim_link;
local _shim_realpath;
if [[ ! -d "${HOME}/.phpenv/versions" ]]; then
mkdir -p "${HOME}/.phpenv/versions"
fi
for _shim_link in "${HOME}"/.phpenv/versions/[0-9].[0-9]*/; do
_shim_realpath="$(cd -P "$_shim_link" && pwd)"
if [[ "$_shim_realpath" == "$(brew --cellar)"* ]]; then
unlink "$_shim_link" 2>/dev/null || true
fi
done
}
phpenv_versions_rehash() {
local _php_path;
local _php_full_ver;
local _php_version;
if [[ ! -d "${HOME}/.phpenv/versions" ]]; then
mkdir -p "${HOME}/.phpenv/versions"
fi
for _php_path in "$(brew --cellar)"/php*/[0-9].[0-9].*; do
_php_full_ver="${_php_path##*/}";
_php_version="${_php_full_ver%.*}";
unlink "${HOME}/.phpenv/versions/${_php_version}" 2>/dev/null || true
ln -s "${_php_path}" "${HOME}/.phpenv/versions/${_php_version}" 2>/dev/null || true
done
phpenv rehash
}
brew_install
php_install
phpenv_install
phpenv_versions_cleanup
phpenv_versions_rehash
|
troyxmccall/dotfiles
|
phpenv-macos.sh
|
Shell
|
mit
| 3,540 |
#!/bin/sh
ls '/proc/sys/vm/dirty_writeback_centisecs';
ls '/sys/class/scsi_host/host2/link_power_management_policy';
ls '/sys/class/scsi_host/host0/link_power_management_policy';
ls '/sys/module/snd_hda_intel/parameters/power_save';
ls '/sys/class/scsi_host/host1/link_power_management_policy';
ls '/sys/class/scsi_host/host3/link_power_management_policy';
ls '/sys/class/scsi_host/host4/link_power_management_policy';
ls '/sys/class/scsi_host/host5/link_power_management_policy';
ls '/proc/sys/kernel/nmi_watchdog';
ls '/sys/bus/pci/devices/0000:00:1c.5/power/control';
ls '/sys/bus/pci/devices/0000:00:00.0/power/control';
ls '/sys/bus/pci/devices/0000:00:02.0/power/control';
ls '/sys/bus/pci/devices/0000:00:14.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1c.1/power/control';
ls '/sys/bus/pci/devices/0000:00:16.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1a.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1b.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1c.2/power/control';
ls '/sys/bus/pci/devices/0000:00:1f.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1f.2/power/control';
ls '/sys/bus/pci/devices/0000:00:1f.3/power/control';
ls '/sys/bus/pci/devices/0000:03:00.0/power/control';
ls '/sys/bus/pci/devices/0000:04:00.0/power/control';
ls '/sys/bus/pci/devices/0000:09:00.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1c.0/power/control';
ls '/sys/bus/pci/devices/0000:00:1d.0/power/control';
|
nobiruwa/home-directory
|
sbin/detect-sysfs-e130.sh
|
Shell
|
mit
| 1,436 |
./cleanup.sh
#set -e
aclocal
automake --add-missing
autoconf
mkdir build
|
ttroy50/vsid
|
bootstrap.sh
|
Shell
|
mit
| 75 |
# Node Version Manager
# Implemented as a POSIX-compliant function
# Should work on sh, dash, bash, ksh, zsh
# To use source this file from your bash profile
#
# Implemented by Tim Caswell <[email protected]>
# with much bash help from Matthew Ranney
{ # this ensures the entire script is downloaded #
NVM_SCRIPT_SOURCE="$_"
nvm_has() {
type "$1" > /dev/null 2>&1
}
nvm_is_alias() {
# this is intentionally not "command alias" so it works in zsh.
\alias "$1" > /dev/null 2>&1
}
nvm_get_latest() {
local NVM_LATEST_URL
if nvm_has "curl"; then
NVM_LATEST_URL="$(curl -q -w "%{url_effective}\n" -L -s -S http://latest.nvm.sh -o /dev/null)"
elif nvm_has "wget"; then
NVM_LATEST_URL="$(wget http://latest.nvm.sh --server-response -O /dev/null 2>&1 | command awk '/^ Location: /{DEST=$2} END{ print DEST }')"
else
>&2 echo 'nvm needs curl or wget to proceed.'
return 1
fi
if [ "_$NVM_LATEST_URL" = "_" ]; then
>&2 echo "http://latest.nvm.sh did not redirect to the latest release on Github"
return 2
else
echo "$NVM_LATEST_URL" | command awk -F '/' '{print $NF}'
fi
}
nvm_download() {
if nvm_has "curl"; then
curl -q $*
elif nvm_has "wget"; then
# Emulate curl with wget
ARGS=$(echo "$*" | command sed -e 's/--progress-bar /--progress=bar /' \
-e 's/-L //' \
-e 's/-I /--server-response /' \
-e 's/-s /-q /' \
-e 's/-o /-O /' \
-e 's/-C - /-c /')
eval wget $ARGS
fi
}
nvm_has_system_node() {
[ "$(nvm deactivate >/dev/null 2>&1 && command -v node)" != '' ]
}
nvm_has_system_iojs() {
[ "$(nvm deactivate >/dev/null 2>&1 && command -v iojs)" != '' ]
}
nvm_print_npm_version() {
if nvm_has "npm"; then
echo " (npm v$(npm --version 2>/dev/null))"
fi
}
# Make zsh glob matching behave same as bash
# This fixes the "zsh: no matches found" errors
if [ -z "${NVM_CD_FLAGS-}" ]; then
export NVM_CD_FLAGS=''
fi
if nvm_has "unsetopt"; then
unsetopt nomatch 2>/dev/null
NVM_CD_FLAGS="-q"
fi
# Auto detect the NVM_DIR when not set
if [ -z "${NVM_DIR-}" ]; then
if [ -n "$BASH_SOURCE" ]; then
NVM_SCRIPT_SOURCE="${BASH_SOURCE[0]}"
fi
NVM_DIR="$(cd $NVM_CD_FLAGS "$(dirname "${NVM_SCRIPT_SOURCE:-$0}")" > /dev/null && \pwd)"
export NVM_DIR
fi
unset NVM_SCRIPT_SOURCE 2> /dev/null
# Setup mirror location if not already set
if [ -z "${NVM_NODEJS_ORG_MIRROR-}" ]; then
export NVM_NODEJS_ORG_MIRROR="https://nodejs.org/dist"
fi
if [ -z "${NVM_IOJS_ORG_MIRROR-}" ]; then
export NVM_IOJS_ORG_MIRROR="https://iojs.org/dist"
fi
nvm_tree_contains_path() {
local tree
tree="$1"
local node_path
node_path="$2"
if [ "@$tree@" = "@@" ] || [ "@$node_path@" = "@@" ]; then
>&2 echo "both the tree and the node path are required"
return 2
fi
local pathdir
pathdir=$(dirname "$node_path")
while [ "$pathdir" != "" ] && [ "$pathdir" != "." ] && [ "$pathdir" != "/" ] && [ "$pathdir" != "$tree" ]; do
pathdir=$(dirname "$pathdir")
done
[ "$pathdir" = "$tree" ]
}
# Traverse up in directory tree to find containing folder
nvm_find_up() {
local path
path=$PWD
while [ "$path" != "" ] && [ ! -f "$path/$1" ]; do
path=${path%/*}
done
echo "$path"
}
nvm_find_nvmrc() {
local dir
dir="$(nvm_find_up '.nvmrc')"
if [ -e "$dir/.nvmrc" ]; then
echo "$dir/.nvmrc"
fi
}
# Obtain nvm version from rc file
nvm_rc_version() {
export NVM_RC_VERSION=''
local NVMRC_PATH
NVMRC_PATH="$(nvm_find_nvmrc)"
if [ -e "$NVMRC_PATH" ]; then
read -r NVM_RC_VERSION < "$NVMRC_PATH" || printf ''
echo "Found '$NVMRC_PATH' with version <$NVM_RC_VERSION>"
else
>&2 echo "No .nvmrc file found"
return 1
fi
}
nvm_version_greater() {
local LHS
LHS="$(nvm_normalize_version "$1")"
local RHS
RHS="$(nvm_normalize_version "$2")"
[ "$LHS" -gt "$RHS" ];
}
nvm_version_greater_than_or_equal_to() {
local LHS
LHS="$(nvm_normalize_version "$1")"
local RHS
RHS="$(nvm_normalize_version "$2")"
[ "$LHS" -ge "$RHS" ];
}
nvm_version_dir() {
local NVM_WHICH_DIR
NVM_WHICH_DIR="$1"
if [ -z "$NVM_WHICH_DIR" ] || [ "_$NVM_WHICH_DIR" = "_new" ]; then
echo "$NVM_DIR/versions/node"
elif [ "_$NVM_WHICH_DIR" = "_iojs" ]; then
echo "$NVM_DIR/versions/io.js"
elif [ "_$NVM_WHICH_DIR" = "_old" ]; then
echo "$NVM_DIR"
else
echo "unknown version dir" >&2
return 3
fi
}
nvm_alias_path() {
echo "$(nvm_version_dir old)/alias"
}
nvm_version_path() {
local VERSION
VERSION="$1"
if [ -z "$VERSION" ]; then
echo "version is required" >&2
return 3
elif nvm_is_iojs_version "$VERSION"; then
echo "$(nvm_version_dir iojs)/$(nvm_strip_iojs_prefix "$VERSION")"
elif nvm_version_greater 0.12.0 "$VERSION"; then
echo "$(nvm_version_dir old)/$VERSION"
else
echo "$(nvm_version_dir new)/$VERSION"
fi
}
nvm_ensure_version_installed() {
local PROVIDED_VERSION
PROVIDED_VERSION="$1"
local LOCAL_VERSION
local EXIT_CODE
LOCAL_VERSION="$(nvm_version "$PROVIDED_VERSION")"
EXIT_CODE="$?"
local NVM_VERSION_DIR
if [ "_$EXIT_CODE" = "_0" ]; then
NVM_VERSION_DIR="$(nvm_version_path "$LOCAL_VERSION")"
fi
if [ "_$EXIT_CODE" != "_0" ] || [ ! -d "$NVM_VERSION_DIR" ]; then
VERSION="$(nvm_resolve_alias "$PROVIDED_VERSION")"
if [ $? -eq 0 ]; then
echo "N/A: version \"$PROVIDED_VERSION -> $VERSION\" is not yet installed" >&2
else
local PREFIXED_VERSION
PREFIXED_VERSION="$(nvm_ensure_version_prefix "$PROVIDED_VERSION")"
echo "N/A: version \"${PREFIXED_VERSION:-$PROVIDED_VERSION}\" is not yet installed" >&2
fi
return 1
fi
}
# Expand a version using the version cache
nvm_version() {
local PATTERN
PATTERN="$1"
local VERSION
# The default version is the current one
if [ -z "$PATTERN" ]; then
PATTERN='current'
fi
if [ "$PATTERN" = "current" ]; then
nvm_ls_current
return $?
fi
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
case "_$PATTERN" in
"_$NVM_NODE_PREFIX" | "_$NVM_NODE_PREFIX-")
PATTERN="stable"
;;
esac
VERSION="$(nvm_ls "$PATTERN" | command tail -n1)"
if [ -z "$VERSION" ] || [ "_$VERSION" = "_N/A" ]; then
echo "N/A"
return 3;
else
echo "$VERSION"
fi
}
nvm_remote_version() {
local PATTERN
PATTERN="$1"
local VERSION
if nvm_validate_implicit_alias "$PATTERN" 2> /dev/null ; then
case "_$PATTERN" in
"_$(nvm_iojs_prefix)")
VERSION="$(nvm_ls_remote_iojs | command tail -n1)"
;;
*)
VERSION="$(nvm_ls_remote "$PATTERN")"
;;
esac
else
VERSION="$(nvm_remote_versions "$PATTERN" | command tail -n1)"
fi
echo "$VERSION"
if [ "_$VERSION" = '_N/A' ]; then
return 3
fi
}
nvm_remote_versions() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local PATTERN
PATTERN="$1"
case "_$PATTERN" in
"_$NVM_IOJS_PREFIX" | "_io.js")
VERSIONS="$(nvm_ls_remote_iojs)"
;;
"_$(nvm_node_prefix)")
VERSIONS="$(nvm_ls_remote)"
;;
*)
if nvm_validate_implicit_alias "$PATTERN" 2> /dev/null ; then
echo >&2 "Implicit aliases are not supported in nvm_remote_versions."
return 1
fi
VERSIONS="$(echo "$(nvm_ls_remote "$PATTERN")
$(nvm_ls_remote_iojs "$PATTERN")" | command grep -v "N/A" | command sed '/^$/d')"
;;
esac
if [ -z "$VERSIONS" ]; then
echo "N/A"
return 3
else
echo "$VERSIONS"
fi
}
nvm_is_valid_version() {
if nvm_validate_implicit_alias "$1" 2> /dev/null; then
return 0
fi
case "$1" in
"$(nvm_iojs_prefix)" | \
"$(nvm_node_prefix)")
return 0
;;
*)
local VERSION
VERSION="$(nvm_strip_iojs_prefix "$1")"
nvm_version_greater "$VERSION"
;;
esac
}
nvm_normalize_version() {
echo "${1#v}" | command awk -F. '{ printf("%d%06d%06d\n", $1,$2,$3); }'
}
nvm_ensure_version_prefix() {
local NVM_VERSION
NVM_VERSION="$(nvm_strip_iojs_prefix "$1" | command sed -e 's/^\([0-9]\)/v\1/g')"
if nvm_is_iojs_version "$1"; then
nvm_add_iojs_prefix "$NVM_VERSION"
else
echo "$NVM_VERSION"
fi
}
nvm_format_version() {
local VERSION
VERSION="$(nvm_ensure_version_prefix "$1")"
local NUM_GROUPS
NUM_GROUPS="$(nvm_num_version_groups "$VERSION")"
if [ $NUM_GROUPS -lt 3 ]; then
nvm_format_version "${VERSION%.}.0"
else
echo "$VERSION" | cut -f1-3 -d.
fi
}
nvm_num_version_groups() {
local VERSION
VERSION="$1"
VERSION="${VERSION#v}"
VERSION="${VERSION%.}"
if [ -z "$VERSION" ]; then
echo "0"
return
fi
local NVM_NUM_DOTS
NVM_NUM_DOTS=$(echo "$VERSION" | command sed -e 's/[^\.]//g')
local NVM_NUM_GROUPS
NVM_NUM_GROUPS=".$NVM_NUM_DOTS" # add extra dot, since it's (n - 1) dots at this point
echo "${#NVM_NUM_GROUPS}"
}
nvm_strip_path() {
echo "$1" | command sed \
-e "s#$NVM_DIR/[^/]*$2[^:]*:##g" \
-e "s#:$NVM_DIR/[^/]*$2[^:]*##g" \
-e "s#$NVM_DIR/[^/]*$2[^:]*##g" \
-e "s#$NVM_DIR/versions/[^/]*/[^/]*$2[^:]*:##g" \
-e "s#:$NVM_DIR/versions/[^/]*/[^/]*$2[^:]*##g" \
-e "s#$NVM_DIR/versions/[^/]*/[^/]*$2[^:]*##g"
}
nvm_prepend_path() {
if [ -z "$1" ]; then
echo "$2"
else
echo "$2:$1"
fi
}
nvm_binary_available() {
# binaries started with node 0.8.6
local FIRST_VERSION_WITH_BINARY
FIRST_VERSION_WITH_BINARY="0.8.6"
nvm_version_greater_than_or_equal_to "$(nvm_strip_iojs_prefix "$1")" "$FIRST_VERSION_WITH_BINARY"
}
nvm_alias() {
local ALIAS
ALIAS="$1"
if [ -z "$ALIAS" ]; then
echo >&2 'An alias is required.'
return 1
fi
local NVM_ALIAS_PATH
NVM_ALIAS_PATH="$(nvm_alias_path)/$ALIAS"
if [ ! -f "$NVM_ALIAS_PATH" ]; then
echo >&2 'Alias does not exist.'
return 2
fi
command cat "$NVM_ALIAS_PATH"
}
nvm_ls_current() {
local NVM_LS_CURRENT_NODE_PATH
NVM_LS_CURRENT_NODE_PATH="$(command which node 2> /dev/null)"
if [ $? -ne 0 ]; then
echo 'none'
elif nvm_tree_contains_path "$(nvm_version_dir iojs)" "$NVM_LS_CURRENT_NODE_PATH"; then
nvm_add_iojs_prefix "$(iojs --version 2>/dev/null)"
elif nvm_tree_contains_path "$NVM_DIR" "$NVM_LS_CURRENT_NODE_PATH"; then
local VERSION
VERSION="$(node --version 2>/dev/null)"
if [ "$VERSION" = "v0.6.21-pre" ]; then
echo "v0.6.21"
else
echo "$VERSION"
fi
else
echo 'system'
fi
}
nvm_resolve_alias() {
if [ -z "$1" ]; then
return 1
fi
local PATTERN
PATTERN="$1"
local ALIAS
ALIAS="$PATTERN"
local ALIAS_TEMP
local SEEN_ALIASES
SEEN_ALIASES="$ALIAS"
while true; do
ALIAS_TEMP="$(nvm_alias "$ALIAS" 2> /dev/null)"
if [ -z "$ALIAS_TEMP" ]; then
break
fi
if [ -n "$ALIAS_TEMP" ] \
&& command printf "$SEEN_ALIASES" | command grep -e "^$ALIAS_TEMP$" > /dev/null; then
ALIAS="∞"
break
fi
SEEN_ALIASES="$SEEN_ALIASES\n$ALIAS_TEMP"
ALIAS="$ALIAS_TEMP"
done
if [ -n "$ALIAS" ] && [ "_$ALIAS" != "_$PATTERN" ]; then
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
case "_$ALIAS" in
"_∞" | \
"_$NVM_IOJS_PREFIX" | "_$NVM_IOJS_PREFIX-" | \
"_$NVM_NODE_PREFIX" )
echo "$ALIAS"
;;
*)
nvm_ensure_version_prefix "$ALIAS"
;;
esac
return 0
fi
if nvm_validate_implicit_alias "$PATTERN" 2> /dev/null ; then
local IMPLICIT
IMPLICIT="$(nvm_print_implicit_alias local "$PATTERN" 2> /dev/null)"
if [ -n "$IMPLICIT" ]; then
nvm_ensure_version_prefix "$IMPLICIT"
fi
fi
return 2
}
nvm_resolve_local_alias() {
if [ -z "$1" ]; then
return 1
fi
local VERSION
local EXIT_CODE
VERSION="$(nvm_resolve_alias "$1")"
EXIT_CODE=$?
if [ -z "$VERSION" ]; then
return $EXIT_CODE
fi
if [ "_$VERSION" != "_∞" ]; then
nvm_version "$VERSION"
else
echo "$VERSION"
fi
}
nvm_iojs_prefix() {
echo "iojs"
}
nvm_node_prefix() {
echo "node"
}
nvm_is_iojs_version() {
case "$1" in iojs-*) return 0 ;; esac
return 1
}
nvm_add_iojs_prefix() {
command echo "$(nvm_iojs_prefix)-$(nvm_ensure_version_prefix "$(nvm_strip_iojs_prefix "$1")")"
}
nvm_strip_iojs_prefix() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
if [ "_$1" = "_$NVM_IOJS_PREFIX" ]; then
echo
else
echo "${1#"$NVM_IOJS_PREFIX"-}"
fi
}
nvm_ls() {
local PATTERN
PATTERN="${1-}"
local VERSIONS
VERSIONS=''
if [ "$PATTERN" = 'current' ]; then
nvm_ls_current
return
fi
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local NVM_VERSION_DIR_IOJS
NVM_VERSION_DIR_IOJS="$(nvm_version_dir "$NVM_IOJS_PREFIX")"
local NVM_VERSION_DIR_NEW
NVM_VERSION_DIR_NEW="$(nvm_version_dir new)"
local NVM_VERSION_DIR_OLD
NVM_VERSION_DIR_OLD="$(nvm_version_dir old)"
case "$PATTERN" in
"$NVM_IOJS_PREFIX" | "$NVM_NODE_PREFIX" )
PATTERN="$PATTERN-"
;;
*)
if nvm_resolve_local_alias "$PATTERN"; then
return
fi
PATTERN="$(nvm_ensure_version_prefix "$PATTERN")"
;;
esac
if [ "_$PATTERN" = "_N/A" ]; then
return
fi
# If it looks like an explicit version, don't do anything funny
local NVM_PATTERN_STARTS_WITH_V
case $PATTERN in
v*) NVM_PATTERN_STARTS_WITH_V=true ;;
*) NVM_PATTERN_STARTS_WITH_V=false ;;
esac
if [ $NVM_PATTERN_STARTS_WITH_V = true ] && [ "_$(nvm_num_version_groups "$PATTERN")" = "_3" ]; then
if [ -d "$(nvm_version_path "$PATTERN")" ]; then
VERSIONS="$PATTERN"
elif [ -d "$(nvm_version_path "$(nvm_add_iojs_prefix "$PATTERN")")" ]; then
VERSIONS="$(nvm_add_iojs_prefix "$PATTERN")"
fi
else
case "$PATTERN" in
"$NVM_IOJS_PREFIX-" | "$NVM_NODE_PREFIX-" | "system") ;;
*)
local NUM_VERSION_GROUPS
NUM_VERSION_GROUPS="$(nvm_num_version_groups "$PATTERN")"
if [ "_$NUM_VERSION_GROUPS" = "_2" ] || [ "_$NUM_VERSION_GROUPS" = "_1" ]; then
PATTERN="${PATTERN%.}."
fi
;;
esac
local ZHS_HAS_SHWORDSPLIT_UNSET
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
local NVM_DIRS_TO_SEARCH1
NVM_DIRS_TO_SEARCH1=''
local NVM_DIRS_TO_SEARCH2
NVM_DIRS_TO_SEARCH2=''
local NVM_DIRS_TO_SEARCH3
NVM_DIRS_TO_SEARCH3=''
local NVM_ADD_SYSTEM
NVM_ADD_SYSTEM=false
if nvm_is_iojs_version "$PATTERN"; then
NVM_DIRS_TO_SEARCH1="$NVM_VERSION_DIR_IOJS"
PATTERN="$(nvm_strip_iojs_prefix "$PATTERN")"
if nvm_has_system_iojs; then
NVM_ADD_SYSTEM=true
fi
elif [ "_$PATTERN" = "_$NVM_NODE_PREFIX-" ]; then
NVM_DIRS_TO_SEARCH1="$NVM_VERSION_DIR_OLD"
NVM_DIRS_TO_SEARCH2="$NVM_VERSION_DIR_NEW"
PATTERN=''
if nvm_has_system_node; then
NVM_ADD_SYSTEM=true
fi
else
NVM_DIRS_TO_SEARCH1="$NVM_VERSION_DIR_OLD"
NVM_DIRS_TO_SEARCH2="$NVM_VERSION_DIR_NEW"
NVM_DIRS_TO_SEARCH3="$NVM_VERSION_DIR_IOJS"
if nvm_has_system_iojs || nvm_has_system_node; then
NVM_ADD_SYSTEM=true
fi
fi
if ! [ -d "$NVM_DIRS_TO_SEARCH1" ]; then
NVM_DIRS_TO_SEARCH1=''
fi
if ! [ -d "$NVM_DIRS_TO_SEARCH2" ]; then
NVM_DIRS_TO_SEARCH2="$NVM_DIRS_TO_SEARCH1"
fi
if ! [ -d "$NVM_DIRS_TO_SEARCH3" ]; then
NVM_DIRS_TO_SEARCH3="$NVM_DIRS_TO_SEARCH2"
fi
if [ -z "$PATTERN" ]; then
PATTERN='v'
fi
if [ -n "$NVM_DIRS_TO_SEARCH1$NVM_DIRS_TO_SEARCH2$NVM_DIRS_TO_SEARCH3" ]; then
VERSIONS="$(command find "$NVM_DIRS_TO_SEARCH1" "$NVM_DIRS_TO_SEARCH2" "$NVM_DIRS_TO_SEARCH3" -maxdepth 1 -type d -name "$PATTERN*" \
| command sed "
s#$NVM_VERSION_DIR_IOJS/#$NVM_IOJS_PREFIX-#;
\#$NVM_VERSION_DIR_IOJS# d;
s#^$NVM_DIR/##;
\#^versions\$# d;
s#^versions/##;
s#^v#$NVM_NODE_PREFIX-v#;
s#^\($NVM_IOJS_PREFIX\)[-/]v#\1.v#;
s#^\($NVM_NODE_PREFIX\)[-/]v#\1.v#" \
| command sort -t. -u -k 2.2,2n -k 3,3n -k 4,4n \
| command sed "
s/^\($NVM_IOJS_PREFIX\)\./\1-/;
s/^$NVM_NODE_PREFIX\.//" \
)"
fi
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
fi
if [ "${NVM_ADD_SYSTEM-}" = true ]; then
if [ -z "$PATTERN" ] || [ "_$PATTERN" = "_v" ]; then
VERSIONS="$VERSIONS$(command printf '\n%s' 'system')"
elif [ "$PATTERN" = 'system' ]; then
VERSIONS="$(command printf '%s' 'system')"
fi
fi
if [ -z "$VERSIONS" ]; then
echo "N/A"
return 3
fi
echo "$VERSIONS"
}
nvm_ls_remote() {
local PATTERN
PATTERN="$1"
if nvm_validate_implicit_alias "$PATTERN" 2> /dev/null ; then
PATTERN="$(nvm_ls_remote "$(nvm_print_implicit_alias remote "$PATTERN")" | command tail -n1)"
elif [ -n "$PATTERN" ]; then
PATTERN="$(nvm_ensure_version_prefix "$PATTERN")"
else
PATTERN=".*"
fi
nvm_ls_remote_index_tab node std "$NVM_NODEJS_ORG_MIRROR" "$PATTERN"
}
nvm_ls_remote_iojs() {
nvm_ls_remote_index_tab iojs std "$NVM_IOJS_ORG_MIRROR" "$1"
}
nvm_ls_remote_index_tab() {
if [ "$#" -lt 4 ]; then
echo "not enough arguments" >&2
return 5
fi
local TYPE
TYPE="$1"
local PREFIX
PREFIX=''
case "$TYPE-$2" in
iojs-std) PREFIX="$(nvm_iojs_prefix)-" ;;
node-std) PREFIX='' ;;
iojs-*)
echo "unknown type of io.js release" >&2
return 4
;;
node-*)
echo "unknown type of node.js release" >&2
return 4
;;
esac
local SORT_COMMAND
SORT_COMMAND='sort'
case "$TYPE" in
node) SORT_COMMAND='sort -t. -u -k 1.2,1n -k 2,2n -k 3,3n' ;;
esac
local MIRROR
MIRROR="$3"
local PATTERN
PATTERN="$4"
local VERSIONS
if [ -n "$PATTERN" ]; then
if [ "_$TYPE" = "_iojs" ]; then
PATTERN="$(nvm_ensure_version_prefix "$(nvm_strip_iojs_prefix "$PATTERN")")"
else
PATTERN="$(nvm_ensure_version_prefix "$PATTERN")"
fi
else
PATTERN=".*"
fi
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
VERSIONS="$(nvm_download -L -s "$MIRROR/index.tab" -o - \
| command sed "
1d;
s/^/$PREFIX/;
s/[[:blank:]].*//" \
| command grep -w "$PATTERN" \
| $SORT_COMMAND)"
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
if [ -z "$VERSIONS" ]; then
echo "N/A"
return 3
fi
echo "$VERSIONS"
}
nvm_checksum() {
local NVM_CHECKSUM
if [ -z "$3" ] || [ "$3" == "sha1" ]; then
if nvm_has "sha1sum" && ! nvm_is_alias "sha1sum"; then
NVM_CHECKSUM="$(command sha1sum "$1" | command awk '{print $1}')"
elif nvm_has "sha1" && ! nvm_is_alias "sha1"; then
NVM_CHECKSUM="$(command sha1 -q "$1")"
elif nvm_has "shasum" && ! nvm_is_alias "shasum"; then
NVM_CHECKSUM="$(shasum "$1" | command awk '{print $1}')"
else
echo "Unaliased sha1sum, sha1, or shasum not found." >&2
return 2
fi
else
if nvm_has "sha256sum" && ! nvm_is_alias "sha256sum"; then
NVM_CHECKSUM="$(sha256sum "$1" | awk '{print $1}')"
elif nvm_has "shasum" && ! nvm_is_alias "shasum"; then
NVM_CHECKSUM="$(shasum -a 256 "$1" | awk '{print $1}')"
elif nvm_has "sha256" && ! nvm_is_alias "sha256"; then
NVM_CHECKSUM="$(sha256 -q "$1" | awk '{print $1}')"
elif nvm_has "gsha256sum" && ! nvm_is_alias "gsha256sum"; then
NVM_CHECKSUM="$(gsha256sum "$1" | awk '{print $1}')"
elif nvm_has "openssl" && ! nvm_is_alias "openssl"; then
NVM_CHECKSUM="$(openssl dgst -sha256 "$1" | rev | awk '{print $1}' | rev)"
elif nvm_has "libressl" && ! nvm_is_alias "libressl"; then
NVM_CHECKSUM="$(libressl dgst -sha256 "$1" | rev | awk '{print $1}' | rev)"
elif nvm_has "bssl" && ! nvm_is_alias "bssl"; then
NVM_CHECKSUM="$(bssl sha256sum "$1" | awk '{print $1}')"
else
echo "Unaliased sha256sum, shasum, sha256, gsha256sum, openssl, libressl, or bssl not found." >&2
echo "WARNING: Continuing *without checksum verification*" >&2
return
fi
fi
if [ "_$NVM_CHECKSUM" = "_$2" ]; then
return
elif [ -z "$2" ]; then
echo 'Checksums empty' #missing in raspberry pi binary
return
else
echo 'Checksums do not match.' >&2
return 1
fi
}
nvm_print_versions() {
local VERSION
local FORMAT
local NVM_CURRENT
NVM_CURRENT=$(nvm_ls_current)
echo "$1" | while read -r VERSION; do
if [ "_$VERSION" = "_$NVM_CURRENT" ]; then
FORMAT='\033[0;32m-> %12s\033[0m'
elif [ "$VERSION" = "system" ]; then
FORMAT='\033[0;33m%15s\033[0m'
elif [ -d "$(nvm_version_path "$VERSION" 2> /dev/null)" ]; then
FORMAT='\033[0;34m%15s\033[0m'
else
FORMAT='%15s'
fi
command printf "$FORMAT\n" "$VERSION"
done
}
nvm_validate_implicit_alias() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
case "$1" in
"stable" | "unstable" | "$NVM_IOJS_PREFIX" | "$NVM_NODE_PREFIX" )
return
;;
*)
echo "Only implicit aliases 'stable', 'unstable', '$NVM_IOJS_PREFIX', and '$NVM_NODE_PREFIX' are supported." >&2
return 1
;;
esac
}
nvm_print_implicit_alias() {
if [ "_$1" != "_local" ] && [ "_$1" != "_remote" ]; then
echo "nvm_print_implicit_alias must be specified with local or remote as the first argument." >&2
return 1
fi
local NVM_IMPLICIT
NVM_IMPLICIT="$2"
if ! nvm_validate_implicit_alias "$NVM_IMPLICIT"; then
return 2
fi
local ZHS_HAS_SHWORDSPLIT_UNSET
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local NVM_COMMAND
local NVM_ADD_PREFIX_COMMAND
local LAST_TWO
case "$NVM_IMPLICIT" in
"$NVM_IOJS_PREFIX")
NVM_COMMAND="nvm_ls_remote_iojs"
NVM_ADD_PREFIX_COMMAND="nvm_add_iojs_prefix"
if [ "_$1" = "_local" ]; then
NVM_COMMAND="nvm_ls $NVM_IMPLICIT"
fi
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
local NVM_IOJS_VERSION
local EXIT_CODE
NVM_IOJS_VERSION="$($NVM_COMMAND)"
EXIT_CODE="$?"
if [ "_$EXIT_CODE" = "_0" ]; then
NVM_IOJS_VERSION="$(echo "$NVM_IOJS_VERSION" | sed "s/^$NVM_IMPLICIT-//" | command grep -e '^v' | command cut -c2- | command cut -d . -f 1,2 | uniq | command tail -1)"
fi
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
if [ "_$NVM_IOJS_VERSION" = "_N/A" ]; then
echo "N/A"
else
$NVM_ADD_PREFIX_COMMAND "$NVM_IOJS_VERSION"
fi
return $EXIT_CODE
;;
"$NVM_NODE_PREFIX")
echo "stable"
return
;;
*)
NVM_COMMAND="nvm_ls_remote"
if [ "_$1" = "_local" ]; then
NVM_COMMAND="nvm_ls node"
fi
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
LAST_TWO=$($NVM_COMMAND | command grep -e '^v' | command cut -c2- | command cut -d . -f 1,2 | uniq)
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
;;
esac
local MINOR
local STABLE
local UNSTABLE
local MOD
local NORMALIZED_VERSION
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
for MINOR in $LAST_TWO; do
NORMALIZED_VERSION="$(nvm_normalize_version "$MINOR")"
if [ "_0${NORMALIZED_VERSION#?}" != "_$NORMALIZED_VERSION" ]; then
STABLE="$MINOR"
else
MOD=$(expr "$NORMALIZED_VERSION" \/ 1000000 \% 2)
if [ "$MOD" -eq 0 ]; then
STABLE="$MINOR"
elif [ "$MOD" -eq 1 ]; then
UNSTABLE="$MINOR"
fi
fi
done
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
if [ "_$2" = '_stable' ]; then
echo "${STABLE}"
elif [ "_$2" = '_unstable' ]; then
echo "${UNSTABLE}"
fi
}
nvm_get_os() {
local NVM_UNAME
NVM_UNAME="$(uname -a)"
local NVM_OS
case "$NVM_UNAME" in
Linux\ *) NVM_OS=linux ;;
Darwin\ *) NVM_OS=darwin ;;
SunOS\ *) NVM_OS=sunos ;;
FreeBSD\ *) NVM_OS=freebsd ;;
esac
echo "$NVM_OS"
}
nvm_get_arch() {
local HOST_ARCH
local NVM_OS
local EXIT_CODE
NVM_OS="$(nvm_get_os)"
# If the OS is SunOS, first try to use pkgsrc to guess
# the most appropriate arch. If it's not available, use
# isainfo to get the instruction set supported by the
# kernel.
if [ "_$NVM_OS" = "_sunos" ]; then
HOST_ARCH=$(pkg_info -Q MACHINE_ARCH pkg_install)
EXIT_CODE=$?
if [ $EXIT_CODE -ne 0 ]; then
HOST_ARCH=$(isainfo -n)
fi
else
HOST_ARCH="$(uname -m)"
fi
local NVM_ARCH
case "$HOST_ARCH" in
x86_64 | amd64) NVM_ARCH="x64" ;;
i*86) NVM_ARCH="x86" ;;
*) NVM_ARCH="$HOST_ARCH" ;;
esac
echo "$NVM_ARCH"
}
nvm_get_minor_version() {
local VERSION
VERSION="$1"
if [ -z "$VERSION" ]; then
echo 'a version is required' >&2
return 1
fi
case "$VERSION" in
v | .* | *..* | v*[!.0123456789]* | [!v]*[!.0123456789]* | [!v0123456789]* | v[!0123456789]*)
echo 'invalid version number' >&2
return 2
;;
esac
local PREFIXED_VERSION
PREFIXED_VERSION="$(nvm_format_version "$VERSION")"
local MINOR
MINOR="$(echo "$PREFIXED_VERSION" | command grep -e '^v' | command cut -c2- | command cut -d . -f 1,2)"
if [ -z "$MINOR" ]; then
echo 'invalid version number! (please report this)' >&2
return 3
fi
echo "$MINOR"
}
nvm_ensure_default_set() {
local VERSION
VERSION="$1"
if [ -z "$VERSION" ]; then
echo 'nvm_ensure_default_set: a version is required' >&2
return 1
fi
if nvm_alias default >/dev/null 2>&1; then
# default already set
return 0
fi
local OUTPUT
OUTPUT="$(nvm alias default "$VERSION")"
local EXIT_CODE
EXIT_CODE="$?"
echo "Creating default alias: $OUTPUT"
return $EXIT_CODE
}
nvm_is_merged_node_version() {
nvm_version_greater_than_or_equal_to "$1" v4.0.0
}
nvm_install_merged_node_binary() {
local NVM_NODE_TYPE
NVM_NODE_TYPE="$1"
local MIRROR
if [ "_$NVM_NODE_TYPE" = "_std" ]; then
MIRROR="$NVM_NODEJS_ORG_MIRROR"
else
echo "unknown type of node.js release" >&2
return 4
fi
local VERSION
VERSION="$2"
local REINSTALL_PACKAGES_FROM
REINSTALL_PACKAGES_FROM="$3"
if ! nvm_is_merged_node_version "$VERSION" || nvm_is_iojs_version "$VERSION"; then
echo 'nvm_install_merged_node_binary requires a node version v4.0 or greater.' >&2
return 10
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "$VERSION")"
local NVM_OS
NVM_OS="$(nvm_get_os)"
local t
local url
local sum
local NODE_PREFIX
local compression
compression="gz"
local tar_compression_flag
tar_compression_flag="x"
if nvm_supports_xz "$VERSION"; then
compression="xz"
tar_compression_flag="J"
fi
NODE_PREFIX="$(nvm_node_prefix)"
if [ -n "$NVM_OS" ]; then
t="$VERSION-$NVM_OS-$(nvm_get_arch)"
url="$MIRROR/$VERSION/$NODE_PREFIX-${t}.tar.${compression}"
sum="$(nvm_download -L -s "$MIRROR/$VERSION/SHASUMS256.txt" -o - | command grep "${NODE_PREFIX}-${t}.tar.${compression}" | command awk '{print $1}')"
local tmpdir
tmpdir="$NVM_DIR/bin/node-${t}"
local tmptarball
tmptarball="$tmpdir/node-${t}.tar.${compression}"
local NVM_INSTALL_ERRORED
command mkdir -p "$tmpdir" && \
echo "Downloading $url..." && \
nvm_download -L -C - --progress-bar "$url" -o "$tmptarball" || \
NVM_INSTALL_ERRORED=true
if grep '404 Not Found' "$tmptarball" >/dev/null; then
NVM_INSTALL_ERRORED=true
echo >&2 "HTTP 404 at URL $url";
fi
if (
[ "$NVM_INSTALL_ERRORED" != true ] && \
nvm_checksum "$tmptarball" "$sum" "sha256" && \
command tar -x${tar_compression_flag}f "$tmptarball" -C "$tmpdir" --strip-components 1 && \
command rm -f "$tmptarball" && \
command mkdir -p "$VERSION_PATH" && \
command mv "$tmpdir"/* "$VERSION_PATH"
); then
return 0
else
echo >&2 "Binary download failed, trying source." >&2
command rm -rf "$tmptarball" "$tmpdir"
return 1
fi
fi
return 2
}
nvm_install_iojs_binary() {
local NVM_IOJS_TYPE
NVM_IOJS_TYPE="$1"
local MIRROR
if [ "_$NVM_IOJS_TYPE" = "_std" ]; then
MIRROR="$NVM_IOJS_ORG_MIRROR"
else
echo "unknown type of io.js release" >&2
return 4
fi
local PREFIXED_VERSION
PREFIXED_VERSION="$2"
local REINSTALL_PACKAGES_FROM
REINSTALL_PACKAGES_FROM="$3"
if ! nvm_is_iojs_version "$PREFIXED_VERSION"; then
echo 'nvm_install_iojs_binary requires an iojs-prefixed version.' >&2
return 10
fi
local VERSION
VERSION="$(nvm_strip_iojs_prefix "$PREFIXED_VERSION")"
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "$PREFIXED_VERSION")"
local NVM_OS
NVM_OS="$(nvm_get_os)"
local t
local url
local sum
local compression
compression="gz"
local tar_compression_flag
tar_compression_flag="x"
if nvm_supports_xz "$VERSION"; then
compression="xz"
tar_compression_flag="J"
fi
if [ -n "$NVM_OS" ]; then
if nvm_binary_available "$VERSION"; then
t="$VERSION-$NVM_OS-$(nvm_get_arch)"
url="$MIRROR/$VERSION/$(nvm_iojs_prefix)-${t}.tar.${compression}"
sum="$(nvm_download -L -s "$MIRROR/$VERSION/SHASUMS256.txt" -o - | command grep "$(nvm_iojs_prefix)-${t}.tar.${compression}" | command awk '{print $1}')"
local tmpdir
tmpdir="$NVM_DIR/bin/iojs-${t}"
local tmptarball
tmptarball="$tmpdir/iojs-${t}.tar.${compression}"
local NVM_INSTALL_ERRORED
command mkdir -p "$tmpdir" && \
echo "Downloading $url..." && \
nvm_download -L -C - --progress-bar "$url" -o "$tmptarball" || \
NVM_INSTALL_ERRORED=true
if grep '404 Not Found' "$tmptarball" >/dev/null; then
NVM_INSTALL_ERRORED=true
echo >&2 "HTTP 404 at URL $url";
fi
if (
[ "$NVM_INSTALL_ERRORED" != true ] && \
nvm_checksum "$tmptarball" "$sum" "sha256" && \
command tar -x${tar_compression_flag}f "$tmptarball" -C "$tmpdir" --strip-components 1 && \
command rm -f "$tmptarball" && \
command mkdir -p "$VERSION_PATH" && \
command mv "$tmpdir"/* "$VERSION_PATH"
); then
return 0
else
echo >&2 "Binary download failed, trying source." >&2
command rm -rf "$tmptarball" "$tmpdir"
return 1
fi
fi
fi
return 2
}
nvm_install_node_binary() {
local VERSION
VERSION="$1"
local REINSTALL_PACKAGES_FROM
REINSTALL_PACKAGES_FROM="$2"
if nvm_is_iojs_version "$VERSION"; then
echo 'nvm_install_node_binary does not allow an iojs-prefixed version.' >&2
return 10
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "$VERSION")"
local NVM_OS
NVM_OS="$(nvm_get_os)"
local t
local url
local sum
if [ -n "$NVM_OS" ]; then
if nvm_binary_available "$VERSION"; then
local NVM_ARCH
NVM_ARCH="$(nvm_get_arch)"
if [ "_$NVM_ARCH" = '_armv6l' ] || [ "_$NVM_ARCH" = 'armv7l' ]; then
NVM_ARCH="arm-pi"
fi
t="$VERSION-$NVM_OS-$NVM_ARCH"
url="$NVM_NODEJS_ORG_MIRROR/$VERSION/node-${t}.tar.gz"
sum=$(nvm_download -L -s "$NVM_NODEJS_ORG_MIRROR/$VERSION/SHASUMS.txt" -o - | command grep "node-${t}.tar.gz" | command awk '{print $1}')
local tmpdir
tmpdir="$NVM_DIR/bin/node-${t}"
local tmptarball
tmptarball="$tmpdir/node-${t}.tar.gz"
local NVM_INSTALL_ERRORED
command mkdir -p "$tmpdir" && \
nvm_download -L -C - --progress-bar "$url" -o "$tmptarball" || \
NVM_INSTALL_ERRORED=true
if grep '404 Not Found' "$tmptarball" >/dev/null; then
NVM_INSTALL_ERRORED=true
echo >&2 "HTTP 404 at URL $url";
fi
if (
[ "$NVM_INSTALL_ERRORED" != true ] && \
nvm_checksum "$tmptarball" "$sum" && \
command tar -xzf "$tmptarball" -C "$tmpdir" --strip-components 1 && \
command rm -f "$tmptarball" && \
command mkdir -p "$VERSION_PATH" && \
command mv "$tmpdir"/* "$VERSION_PATH"
); then
return 0
else
echo >&2 "Binary download failed, trying source."
command rm -rf "$tmptarball" "$tmpdir"
return 1
fi
fi
fi
return 2
}
nvm_get_make_jobs() {
if nvm_is_natural_num "${1-}"; then
NVM_MAKE_JOBS="$1"
echo "number of \`make\` jobs: $NVM_MAKE_JOBS"
return
elif [ -n "${1-}" ]; then
unset NVM_MAKE_JOBS
echo >&2 "$1 is invalid for number of \`make\` jobs, must be a natural number"
fi
local NVM_OS
NVM_OS="$(nvm_get_os)"
local NVM_CPU_THREADS
if [ "_$NVM_OS" = "_linux" ]; then
NVM_CPU_THREADS="$(grep -c 'core id' /proc/cpuinfo)"
elif [ "_$NVM_OS" = "_freebsd" ] || [ "_$NVM_OS" = "_darwin" ]; then
NVM_CPU_THREADS="$(sysctl -n hw.ncpu)"
elif [ "_$NVM_OS" = "_sunos" ]; then
NVM_CPU_THREADS="$(psrinfo | wc -l)"
fi
if ! nvm_is_natural_num "$NVM_CPU_THREADS" ; then
echo "Can not determine how many thread(s) we can use, set to only 1 now." >&2
echo "Please report an issue on GitHub to help us make it better and run it faster on your computer!" >&2
NVM_MAKE_JOBS=1
else
echo "Detected that you have $NVM_CPU_THREADS CPU thread(s)"
if [ $NVM_CPU_THREADS -gt 2 ]; then
NVM_MAKE_JOBS=$(($NVM_CPU_THREADS - 1))
echo "Set the number of jobs to $NVM_CPU_THREADS - 1 = $NVM_MAKE_JOBS jobs to speed up the build"
else
NVM_MAKE_JOBS=1
echo "Number of CPU thread(s) less or equal to 2 will have only one job a time for 'make'"
fi
fi
}
nvm_install_node_source() {
local VERSION
VERSION="$1"
local NVM_MAKE_JOBS
NVM_MAKE_JOBS="$2"
local ADDITIONAL_PARAMETERS
ADDITIONAL_PARAMETERS="$3"
local NVM_ARCH
NVM_ARCH="$(nvm_get_arch)"
if [ "_$NVM_ARCH" = '_armv6l' ] || [ "_$NVM_ARCH" = '_armv7l' ]; then
ADDITIONAL_PARAMETERS="--without-snapshot $ADDITIONAL_PARAMETERS"
fi
if [ -n "$ADDITIONAL_PARAMETERS" ]; then
echo "Additional options while compiling: $ADDITIONAL_PARAMETERS"
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "$VERSION")"
local NVM_OS
NVM_OS="$(nvm_get_os)"
local tarball
tarball=''
local sum
sum=''
local make
make='make'
if [ "_$NVM_OS" = "_freebsd" ]; then
make='gmake'
MAKE_CXX="CXX=c++"
fi
local tmpdir
tmpdir="$NVM_DIR/src"
local tmptarball
tmptarball="$tmpdir/node-$VERSION.tar.gz"
if [ "$(nvm_download -L -s -I "$NVM_NODEJS_ORG_MIRROR/$VERSION/node-$VERSION.tar.gz" -o - 2>&1 | command grep '200 OK')" != '' ]; then
tarball="$NVM_NODEJS_ORG_MIRROR/$VERSION/node-$VERSION.tar.gz"
sum=$(nvm_download -L -s "$NVM_NODEJS_ORG_MIRROR/$VERSION/SHASUMS.txt" -o - | command grep "node-${VERSION}.tar.gz" | command awk '{print $1}')
elif [ "$(nvm_download -L -s -I "$NVM_NODEJS_ORG_MIRROR/node-$VERSION.tar.gz" -o - | command grep '200 OK')" != '' ]; then
tarball="$NVM_NODEJS_ORG_MIRROR/node-$VERSION.tar.gz"
fi
if (
[ -n "$tarball" ] && \
command mkdir -p "$tmpdir" && \
echo "Downloading $tarball..." && \
nvm_download -L --progress-bar "$tarball" -o "$tmptarball" && \
nvm_checksum "$tmptarball" "$sum" && \
command tar -xzf "$tmptarball" -C "$tmpdir" && \
cd "$tmpdir/node-$VERSION" && \
./configure --prefix="$VERSION_PATH" $ADDITIONAL_PARAMETERS && \
$make -j $NVM_MAKE_JOBS ${MAKE_CXX-} && \
command rm -f "$VERSION_PATH" 2>/dev/null && \
$make -j $NVM_MAKE_JOBS ${MAKE_CXX-} install
)
then
if ! nvm_has "npm" ; then
echo "Installing npm..."
if nvm_version_greater 0.2.0 "$VERSION"; then
echo "npm requires node v0.2.3 or higher" >&2
elif nvm_version_greater_than_or_equal_to "$VERSION" 0.2.0; then
if nvm_version_greater 0.2.3 "$VERSION"; then
echo "npm requires node v0.2.3 or higher" >&2
else
nvm_download -L https://npmjs.org/install.sh -o - | clean=yes npm_install=0.2.19 sh
fi
else
nvm_download -L https://npmjs.org/install.sh -o - | clean=yes sh
fi
fi
else
echo "nvm: install $VERSION failed!" >&2
return 1
fi
return $?
}
nvm_match_version() {
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local PROVIDED_VERSION
PROVIDED_VERSION="$1"
case "_$PROVIDED_VERSION" in
"_$NVM_IOJS_PREFIX" | "_io.js")
nvm_version "$NVM_IOJS_PREFIX"
;;
"_system")
echo "system"
;;
*)
nvm_version "$PROVIDED_VERSION"
;;
esac
}
nvm_npm_global_modules() {
local NPMLIST
local VERSION
VERSION="$1"
if [ "_$VERSION" = "_system" ]; then
NPMLIST=$(nvm use system > /dev/null && npm list -g --depth=0 2> /dev/null | command tail -n +2)
else
NPMLIST=$(nvm use "$VERSION" > /dev/null && npm list -g --depth=0 2> /dev/null | command tail -n +2)
fi
local INSTALLS
INSTALLS=$(echo "$NPMLIST" | command sed -e '/ -> / d' -e '/\(empty\)/ d' -e 's/^.* \(.*@[^ ]*\).*/\1/' -e '/^npm@[^ ]*.*$/ d' | command xargs)
local LINKS
LINKS="$(echo "$NPMLIST" | command sed -n 's/.* -> \(.*\)/\1/ p')"
echo "$INSTALLS //// $LINKS"
}
nvm_die_on_prefix() {
local NVM_DELETE_PREFIX
NVM_DELETE_PREFIX="$1"
case "$NVM_DELETE_PREFIX" in
0|1) ;;
*)
echo >&2 'First argument "delete the prefix" must be zero or one'
return 1
;;
esac
local NVM_COMMAND
NVM_COMMAND="$2"
if [ -z "$NVM_COMMAND" ]; then
echo >&2 'Second argument "nvm command" must be nonempty'
return 2
fi
if [ -n "${PREFIX-}" ] && ! (nvm_tree_contains_path "$NVM_DIR" "$PREFIX" >/dev/null 2>&1); then
nvm deactivate >/dev/null 2>&1
echo >&2 "nvm is not compatible with the \"PREFIX\" environment variable: currently set to \"$PREFIX\""
echo >&2 "Run \`unset PREFIX\` to unset it."
return 3
fi
if [ -n "${NPM_CONFIG_PREFIX-}" ] && ! (nvm_tree_contains_path "$NVM_DIR" "$NPM_CONFIG_PREFIX" >/dev/null 2>&1); then
nvm deactivate >/dev/null 2>&1
echo >&2 "nvm is not compatible with the \"NPM_CONFIG_PREFIX\" environment variable: currently set to \"$NPM_CONFIG_PREFIX\""
echo >&2 "Run \`unset NPM_CONFIG_PREFIX\` to unset it."
return 4
fi
if ! nvm_has 'npm'; then
return
fi
local NVM_NPM_PREFIX
NVM_NPM_PREFIX="$(NPM_CONFIG_LOGLEVEL=warn npm config get prefix)"
if ! (nvm_tree_contains_path "$NVM_DIR" "$NVM_NPM_PREFIX" >/dev/null 2>&1); then
if [ "_$NVM_DELETE_PREFIX" = "_1" ]; then
NPM_CONFIG_LOGLEVEL=warn npm config delete prefix
else
nvm deactivate >/dev/null 2>&1
echo >&2 "nvm is not compatible with the npm config \"prefix\" option: currently set to \"$NVM_NPM_PREFIX\""
if nvm_has 'npm'; then
echo >&2 "Run \`npm config delete prefix\` or \`$NVM_COMMAND\` to unset it."
else
echo >&2 "Run \`$NVM_COMMAND\` to unset it."
fi
return 10
fi
fi
}
# Succeeds if $IOJS_VERSION represents an io.js version that has a
# Solaris binary, fails otherwise.
# Currently, only io.js 3.3.1 has a Solaris binary available, and it's the
# latest io.js version available. The expectation is that any potential io.js
# version later than v3.3.1 will also have Solaris binaries.
iojs_version_has_solaris_binary() {
local IOJS_VERSION
IOJS_VERSION="$1"
local STRIPPED_IOJS_VERSION
STRIPPED_IOJS_VERSION="$(nvm_strip_iojs_prefix "$IOJS_VERSION")"
if [ "_$STRIPPED_IOJS_VERSION" = "$IOJS_VERSION" ]; then
return 1
fi
# io.js started shipping Solaris binaries with io.js v3.3.1
nvm_version_greater_than_or_equal_to "$STRIPPED_IOJS_VERSION" v3.3.1
}
# Succeeds if $NODE_VERSION represents a node version that has a
# Solaris binary, fails otherwise.
# Currently, node versions starting from v0.8.6 have a Solaris binary
# avaliable.
node_version_has_solaris_binary() {
local NODE_VERSION
NODE_VERSION="$1"
# Error out if $NODE_VERSION is actually an io.js version
local STRIPPED_IOJS_VERSION
STRIPPED_IOJS_VERSION="$(nvm_strip_iojs_prefix "$NODE_VERSION")"
if [ "_$STRIPPED_IOJS_VERSION" != "_$NODE_VERSION" ]; then
return 1
fi
# node (unmerged) started shipping Solaris binaries with v0.8.6 and
# node versions v1.0.0 or greater are not considered valid "unmerged" node
# versions.
nvm_version_greater_than_or_equal_to "$NODE_VERSION" v0.8.6 &&
! nvm_version_greater_than_or_equal_to "$NODE_VERSION" v1.0.0
}
# Succeeds if $VERSION represents a version (node, io.js or merged) that has a
# Solaris binary, fails otherwise.
nvm_has_solaris_binary() {
local VERSION=$1
if nvm_is_merged_node_version "$VERSION"; then
return 0 # All merged node versions have a Solaris binary
elif nvm_is_iojs_version "$VERSION"; then
iojs_version_has_solaris_binary "$VERSION"
else
node_version_has_solaris_binary "$VERSION"
fi
}
nvm_sanitize_path() {
local SANITIZED_PATH
SANITIZED_PATH="$1"
if [ "_$1" != "_$NVM_DIR" ]; then
SANITIZED_PATH="$(echo "$SANITIZED_PATH" | command sed "s#$NVM_DIR#\$NVM_DIR#g")"
fi
echo "$SANITIZED_PATH" | command sed "s#$HOME#\$HOME#g"
}
nvm_is_natural_num() {
if [ -z "$1" ]; then
return 4
fi
case "$1" in
0) return 1 ;;
-*) return 3 ;; # some BSDs return false positives for double-negated args
*)
[ $1 -eq $1 2> /dev/null ] # returns 2 if it doesn't match
;;
esac
}
# Check version dir permissions
nvm_check_file_permissions() {
for FILE in $1/* $1/.[!.]* $1/..?* ; do
if [ -d "$FILE" ]; then
if ! nvm_check_file_permissions "$FILE"; then
return 2
fi
elif [ -e "$FILE" ] && [ ! -w "$FILE" ]; then
return 1
fi
done
return 0
}
nvm() {
if [ $# -lt 1 ]; then
nvm help
return
fi
local GREP_OPTIONS
GREP_OPTIONS=''
# initialize local variables
local VERSION
local ADDITIONAL_PARAMETERS
local ALIAS
case $1 in
"help" )
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
echo
echo "Node Version Manager"
echo
echo 'Note: <version> refers to any version-like string nvm understands. This includes:'
echo ' - full or partial version numbers, starting with an optional "v" (0.10, v0.1.2, v1)'
echo " - default (built-in) aliases: $NVM_NODE_PREFIX, stable, unstable, $NVM_IOJS_PREFIX, system"
echo ' - custom aliases you define with `nvm alias foo`'
echo
echo 'Usage:'
echo ' nvm help Show this message'
echo ' nvm --version Print out the latest released version of nvm'
echo ' nvm install [-s] <version> Download and install a <version>, [-s] from source. Uses .nvmrc if available'
echo ' --reinstall-packages-from=<version> When installing, reinstall packages installed in <node|iojs|node version number>'
echo ' nvm uninstall <version> Uninstall a version'
echo ' nvm use [--silent] <version> Modify PATH to use <version>. Uses .nvmrc if available'
echo ' nvm exec [--silent] <version> [<command>] Run <command> on <version>. Uses .nvmrc if available'
echo ' nvm run [--silent] <version> [<args>] Run `node` on <version> with <args> as arguments. Uses .nvmrc if available'
echo ' nvm current Display currently activated version'
echo ' nvm ls List installed versions'
echo ' nvm ls <version> List versions matching a given description'
echo ' nvm ls-remote List remote versions available for install'
echo ' nvm version <version> Resolve the given description to a single local version'
echo ' nvm version-remote <version> Resolve the given description to a single remote version'
echo ' nvm deactivate Undo effects of `nvm` on current shell'
echo ' nvm alias [<pattern>] Show all aliases beginning with <pattern>'
echo ' nvm alias <name> <version> Set an alias named <name> pointing to <version>'
echo ' nvm unalias <name> Deletes the alias named <name>'
echo ' nvm reinstall-packages <version> Reinstall global `npm` packages contained in <version> to current version'
echo ' nvm unload Unload `nvm` from shell'
echo ' nvm which [<version>] Display path to installed node version. Uses .nvmrc if available'
echo
echo 'Example:'
echo ' nvm install v0.10.32 Install a specific version number'
echo ' nvm use 0.10 Use the latest available 0.10.x release'
echo ' nvm run 0.10.32 app.js Run app.js using node v0.10.32'
echo ' nvm exec 0.10.32 node app.js Run `node app.js` with the PATH pointing to node v0.10.32'
echo ' nvm alias default 0.10.32 Set default node version on a shell'
echo
echo 'Note:'
echo ' to remove, delete, or uninstall nvm - just remove the `$NVM_DIR` folder (usually `~/.nvm`)'
echo
;;
"debug" )
local ZHS_HAS_SHWORDSPLIT_UNSET
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
echo >&2 "nvm --version: v$(nvm --version)"
echo >&2 "\$SHELL: $SHELL"
echo >&2 "\$HOME: $HOME"
echo >&2 "\$NVM_DIR: '$(nvm_sanitize_path "$NVM_DIR")'"
echo >&2 "\$PREFIX: '$(nvm_sanitize_path "$PREFIX")'"
echo >&2 "\$NPM_CONFIG_PREFIX: '$(nvm_sanitize_path "$NPM_CONFIG_PREFIX")'"
local NVM_DEBUG_OUTPUT
for NVM_DEBUG_COMMAND in 'nvm current' 'which node' 'which iojs' 'which npm' 'npm config get prefix' 'npm root -g'
do
NVM_DEBUG_OUTPUT="$($NVM_DEBUG_COMMAND 2>&1)"
echo >&2 "$NVM_DEBUG_COMMAND: $(nvm_sanitize_path "$NVM_DEBUG_OUTPUT")"
done
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
return 42
;;
"install" | "i" )
local version_not_provided
version_not_provided=0
local NVM_OS
NVM_OS="$(nvm_get_os)"
if ! nvm_has "curl" && ! nvm_has "wget"; then
echo 'nvm needs curl or wget to proceed.' >&2;
return 1
fi
if [ $# -lt 2 ]; then
version_not_provided=1
nvm_rc_version
if [ -z "$NVM_RC_VERSION" ]; then
>&2 nvm help
return 127
fi
fi
shift
local nobinary
nobinary=0
local make_jobs
while [ $# -ne 0 ]
do
case "$1" in
-s)
shift # consume "-s"
nobinary=1
;;
-j)
shift # consume "-j"
nvm_get_make_jobs "$1"
shift # consume job count
;;
*)
break # stop parsing args
;;
esac
done
local provided_version
provided_version="$1"
if [ -z "$provided_version" ]; then
if [ $version_not_provided -ne 1 ]; then
nvm_rc_version
fi
provided_version="$NVM_RC_VERSION"
else
shift
fi
VERSION="$(nvm_remote_version "$provided_version")"
if [ "_$VERSION" = "_N/A" ]; then
echo "Version '$provided_version' not found - try \`nvm ls-remote\` to browse available versions." >&2
return 3
fi
ADDITIONAL_PARAMETERS=''
local PROVIDED_REINSTALL_PACKAGES_FROM
local REINSTALL_PACKAGES_FROM
while [ $# -ne 0 ]
do
case "$1" in
--reinstall-packages-from=*)
PROVIDED_REINSTALL_PACKAGES_FROM="$(echo "$1" | command cut -c 27-)"
REINSTALL_PACKAGES_FROM="$(nvm_version "$PROVIDED_REINSTALL_PACKAGES_FROM")"
;;
--copy-packages-from=*)
PROVIDED_REINSTALL_PACKAGES_FROM="$(echo "$1" | command cut -c 22-)"
REINSTALL_PACKAGES_FROM="$(nvm_version "$PROVIDED_REINSTALL_PACKAGES_FROM")"
;;
*)
ADDITIONAL_PARAMETERS="$ADDITIONAL_PARAMETERS $1"
;;
esac
shift
done
if [ "_$(nvm_ensure_version_prefix "$PROVIDED_REINSTALL_PACKAGES_FROM")" = "_$VERSION" ]; then
echo "You can't reinstall global packages from the same version of node you're installing." >&2
return 4
elif [ ! -z "$PROVIDED_REINSTALL_PACKAGES_FROM" ] && [ "_$REINSTALL_PACKAGES_FROM" = "_N/A" ]; then
echo "If --reinstall-packages-from is provided, it must point to an installed version of node." >&2
return 5
fi
local NVM_NODE_MERGED
local NVM_IOJS
if nvm_is_iojs_version "$VERSION"; then
NVM_IOJS=true
elif nvm_is_merged_node_version "$VERSION"; then
NVM_NODE_MERGED=true
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "$VERSION")"
if [ -d "$VERSION_PATH" ]; then
echo "$VERSION is already installed." >&2
if nvm use "$VERSION" && [ ! -z "$REINSTALL_PACKAGES_FROM" ] && [ "_$REINSTALL_PACKAGES_FROM" != "_N/A" ]; then
nvm reinstall-packages "$REINSTALL_PACKAGES_FROM"
fi
nvm_ensure_default_set "$provided_version"
return $?
fi
if [ "_$NVM_OS" = "_freebsd" ]; then
# node.js and io.js do not have a FreeBSD binary
nobinary=1
echo "Currently, there is no binary for $NVM_OS" >&2
elif [ "_$NVM_OS" = "_sunos" ]; then
# Not all node/io.js versions have a Solaris binary
if ! nvm_has_solaris_binary "$VERSION"; then
nobinary=1
echo "Currently, there is no binary of version $VERSION for $NVM_OS" >&2
fi
fi
local NVM_INSTALL_SUCCESS
# skip binary install if "nobinary" option specified.
if [ $nobinary -ne 1 ] && nvm_binary_available "$VERSION"; then
if [ "$NVM_IOJS" = true ] && nvm_install_iojs_binary std "$VERSION" "$REINSTALL_PACKAGES_FROM"; then
NVM_INSTALL_SUCCESS=true
elif [ "$NVM_NODE_MERGED" = true ] && nvm_install_merged_node_binary std "$VERSION" "$REINSTALL_PACKAGES_FROM"; then
NVM_INSTALL_SUCCESS=true
elif [ "$NVM_IOJS" != true ] && nvm_install_node_binary "$VERSION" "$REINSTALL_PACKAGES_FROM"; then
NVM_INSTALL_SUCCESS=true
fi
fi
if [ "$NVM_INSTALL_SUCCESS" != true ]; then
if [ -z "${NVM_MAKE_JOBS-}" ]; then
nvm_get_make_jobs
fi
if [ "$NVM_IOJS" != true ] && [ "$NVM_NODE_MERGED" != true ]; then
if nvm_install_node_source "$VERSION" "$NVM_MAKE_JOBS" "$ADDITIONAL_PARAMETERS"; then
NVM_INSTALL_SUCCESS=true
fi
elif [ "$NVM_IOJS" = true ]; then
# nvm_install_iojs_source "$VERSION" "$NVM_MAKE_JOBS" "$ADDITIONAL_PARAMETERS"
echo "Installing iojs from source is not currently supported" >&2
return 105
elif [ "$NVM_NODE_MERGED" = true ]; then
# nvm_install_merged_node_source "$VERSION" "$NVM_MAKE_JOBS" "$ADDITIONAL_PARAMETERS"
echo "Installing node v1.0 and greater from source is not currently supported" >&2
return 106
fi
fi
if [ "$NVM_INSTALL_SUCCESS" = true ] && nvm use "$VERSION"; then
nvm_ensure_default_set "$provided_version"
if [ ! -z "$REINSTALL_PACKAGES_FROM" ] \
&& [ "_$REINSTALL_PACKAGES_FROM" != "_N/A" ]; then
nvm reinstall-packages "$REINSTALL_PACKAGES_FROM"
fi
fi
return $?
;;
"uninstall" )
if [ $# -ne 2 ]; then
>&2 nvm help
return 127
fi
local PATTERN
PATTERN="$2"
case "_$PATTERN" in
"_$(nvm_iojs_prefix)" | "_$(nvm_iojs_prefix)-" \
| "_$(nvm_node_prefix)" | "_$(nvm_node_prefix)-")
VERSION="$(nvm_version "$PATTERN")"
;;
*)
VERSION="$(nvm_version "$PATTERN")"
;;
esac
if [ "_$VERSION" = "_$(nvm_ls_current)" ]; then
if nvm_is_iojs_version "$VERSION"; then
echo "nvm: Cannot uninstall currently-active io.js version, $VERSION (inferred from $PATTERN)." >&2
else
echo "nvm: Cannot uninstall currently-active node version, $VERSION (inferred from $PATTERN)." >&2
fi
return 1
fi
local VERSION_PATH
VERSION_PATH="$(nvm_version_path "$VERSION")"
if [ ! -d "$VERSION_PATH" ]; then
echo "$VERSION version is not installed..." >&2
return;
fi
t="$VERSION-$(nvm_get_os)-$(nvm_get_arch)"
local NVM_PREFIX
local NVM_SUCCESS_MSG
if nvm_is_iojs_version "$VERSION"; then
NVM_PREFIX="$(nvm_iojs_prefix)"
NVM_SUCCESS_MSG="Uninstalled io.js $(nvm_strip_iojs_prefix "$VERSION")"
else
NVM_PREFIX="$(nvm_node_prefix)"
NVM_SUCCESS_MSG="Uninstalled node $VERSION"
fi
if ! nvm_check_file_permissions "$VERSION_PATH"; then
>&2 echo 'Cannot uninstall, incorrect permissions on installation folder.'
>&2 echo 'This is usually caused by running `npm install -g` as root. Run the following command as root to fix the permissions and then try again.'
>&2 echo
>&2 echo " chown -R $(whoami) \"$VERSION_PATH\""
return 1
fi
# Delete all files related to target version.
command rm -rf "$NVM_DIR/src/$NVM_PREFIX-$VERSION" \
"$NVM_DIR/src/$NVM_PREFIX-$VERSION.tar.*" \
"$NVM_DIR/bin/$NVM_PREFIX-${t}" \
"$NVM_DIR/bin/$NVM_PREFIX-${t}.tar.*" \
"$VERSION_PATH" 2>/dev/null
echo "$NVM_SUCCESS_MSG"
# rm any aliases that point to uninstalled version.
for ALIAS in $(command grep -l "$VERSION" "$(nvm_alias_path)/*" 2>/dev/null)
do
nvm unalias "$(command basename "$ALIAS")"
done
;;
"deactivate" )
local NEWPATH
NEWPATH="$(nvm_strip_path "$PATH" "/bin")"
if [ "_$PATH" = "_$NEWPATH" ]; then
echo "Could not find $NVM_DIR/*/bin in \$PATH" >&2
else
export PATH="$NEWPATH"
hash -r
echo "$NVM_DIR/*/bin removed from \$PATH"
fi
if [ -n "${MANPATH-}" ]; then
NEWPATH="$(nvm_strip_path "$MANPATH" "/share/man")"
if [ "_$MANPATH" = "_$NEWPATH" ]; then
echo "Could not find $NVM_DIR/*/share/man in \$MANPATH" >&2
else
export MANPATH="$NEWPATH"
echo "$NVM_DIR/*/share/man removed from \$MANPATH"
fi
fi
if [ -n "${NODE_PATH-}" ]; then
NEWPATH="$(nvm_strip_path "$NODE_PATH" "/lib/node_modules")"
if [ "_$NODE_PATH" != "_$NEWPATH" ]; then
export NODE_PATH="$NEWPATH"
echo "$NVM_DIR/*/lib/node_modules removed from \$NODE_PATH"
fi
fi
unset NVM_BIN NVM_PATH
;;
"use" )
local PROVIDED_VERSION
local NVM_USE_SILENT
NVM_USE_SILENT=0
local NVM_DELETE_PREFIX
NVM_DELETE_PREFIX=0
shift # remove "use"
while [ $# -ne 0 ]
do
case "$1" in
--silent) NVM_USE_SILENT=1 ;;
--delete-prefix) NVM_DELETE_PREFIX=1 ;;
*)
if [ -n "$1" ]; then
PROVIDED_VERSION="$1"
fi
;;
esac
shift
done
if [ -z "$PROVIDED_VERSION" ]; then
nvm_rc_version
if [ -n "$NVM_RC_VERSION" ]; then
PROVIDED_VERSION="$NVM_RC_VERSION"
VERSION="$(nvm_version "$PROVIDED_VERSION")"
fi
else
VERSION="$(nvm_match_version "$PROVIDED_VERSION")"
fi
if [ -z "$VERSION" ]; then
>&2 nvm help
return 127
fi
if [ "_$VERSION" = '_system' ]; then
if nvm_has_system_node && nvm deactivate >/dev/null 2>&1; then
if [ $NVM_USE_SILENT -ne 1 ]; then
echo "Now using system version of node: $(node -v 2>/dev/null)$(nvm_print_npm_version)"
fi
return
elif nvm_has_system_iojs && nvm deactivate >/dev/null 2>&1; then
if [ $NVM_USE_SILENT -ne 1 ]; then
echo "Now using system version of io.js: $(iojs --version 2>/dev/null)$(nvm_print_npm_version)"
fi
return
else
if [ $NVM_USE_SILENT -ne 1 ]; then
echo "System version of node not found." >&2
fi
return 127
fi
elif [ "_$VERSION" = "_∞" ]; then
if [ $NVM_USE_SILENT -ne 1 ]; then
echo "The alias \"$PROVIDED_VERSION\" leads to an infinite loop. Aborting." >&2
fi
return 8
fi
# This nvm_ensure_version_installed call can be a performance bottleneck
# on shell startup. Perhaps we can optimize it away or make it faster.
nvm_ensure_version_installed "$PROVIDED_VERSION"
EXIT_CODE=$?
if [ "$EXIT_CODE" != "0" ]; then
return $EXIT_CODE
fi
local NVM_VERSION_DIR
NVM_VERSION_DIR="$(nvm_version_path "$VERSION")"
# Strip other version from PATH
PATH="$(nvm_strip_path "$PATH" "/bin")"
# Prepend current version
PATH="$(nvm_prepend_path "$PATH" "$NVM_VERSION_DIR/bin")"
if nvm_has manpath; then
if [ -z "$MANPATH" ]; then
MANPATH=$(manpath)
fi
# Strip other version from MANPATH
MANPATH="$(nvm_strip_path "$MANPATH" "/share/man")"
# Prepend current version
MANPATH="$(nvm_prepend_path "$MANPATH" "$NVM_VERSION_DIR/share/man")"
export MANPATH
fi
export PATH
hash -r
export NVM_PATH="$NVM_VERSION_DIR/lib/node"
export NVM_BIN="$NVM_VERSION_DIR/bin"
if [ "${NVM_SYMLINK_CURRENT-}" = true ]; then
command rm -f "$NVM_DIR/current" && ln -s "$NVM_VERSION_DIR" "$NVM_DIR/current"
fi
local NVM_USE_OUTPUT
if [ $NVM_USE_SILENT -ne 1 ]; then
if nvm_is_iojs_version "$VERSION"; then
NVM_USE_OUTPUT="Now using io.js $(nvm_strip_iojs_prefix "$VERSION")$(nvm_print_npm_version)"
else
NVM_USE_OUTPUT="Now using node $VERSION$(nvm_print_npm_version)"
fi
fi
if [ "_$VERSION" != "_system" ]; then
local NVM_USE_CMD
NVM_USE_CMD="nvm use --delete-prefix"
if [ -n "$PROVIDED_VERSION" ]; then
NVM_USE_CMD="$NVM_USE_CMD $VERSION"
fi
if [ $NVM_USE_SILENT -eq 1 ]; then
NVM_USE_CMD="$NVM_USE_CMD --silent"
fi
if ! nvm_die_on_prefix "$NVM_DELETE_PREFIX" "$NVM_USE_CMD"; then
return 11
fi
fi
if [ -n "$NVM_USE_OUTPUT" ]; then
echo "$NVM_USE_OUTPUT"
fi
;;
"run" )
local provided_version
local has_checked_nvmrc
has_checked_nvmrc=0
# run given version of node
shift
local NVM_SILENT
NVM_SILENT=0
if [ "_$1" = "_--silent" ]; then
NVM_SILENT=1
shift
fi
if [ $# -lt 1 ]; then
if [ "$NVM_SILENT" -eq 1 ]; then
nvm_rc_version >/dev/null 2>&1 && has_checked_nvmrc=1
else
nvm_rc_version && has_checked_nvmrc=1
fi
if [ -n "$NVM_RC_VERSION" ]; then
VERSION="$(nvm_version "$NVM_RC_VERSION")"
else
VERSION='N/A'
fi
if [ $VERSION = "N/A" ]; then
>&2 nvm help
return 127
fi
fi
provided_version="$1"
if [ -n "$provided_version" ]; then
VERSION="$(nvm_version "$provided_version")"
if [ "_$VERSION" = "_N/A" ] && ! nvm_is_valid_version "$provided_version"; then
provided_version=''
if [ $has_checked_nvmrc -ne 1 ]; then
if [ "$NVM_SILENT" -eq 1 ]; then
nvm_rc_version >/dev/null 2>&1 && has_checked_nvmrc=1
else
nvm_rc_version && has_checked_nvmrc=1
fi
fi
VERSION="$(nvm_version "$NVM_RC_VERSION")"
else
shift
fi
fi
local NVM_IOJS
if nvm_is_iojs_version "$VERSION"; then
NVM_IOJS=true
fi
local ARGS
ARGS="$@"
local OUTPUT
local EXIT_CODE
local ZHS_HAS_SHWORDSPLIT_UNSET
ZHS_HAS_SHWORDSPLIT_UNSET=1
if nvm_has "setopt"; then
ZHS_HAS_SHWORDSPLIT_UNSET=$(setopt | command grep shwordsplit > /dev/null ; echo $?)
setopt shwordsplit
fi
if [ "_$VERSION" = "_N/A" ]; then
nvm_ensure_version_installed "$provided_version"
EXIT_CODE=$?
elif [ -z "$ARGS" ]; then
if [ "$NVM_IOJS" = true ]; then
nvm exec "$VERSION" iojs
else
nvm exec "$VERSION" node
fi
EXIT_CODE="$?"
elif [ "$NVM_IOJS" = true ]; then
[ $NVM_SILENT -eq 1 ] || echo "Running io.js $(nvm_strip_iojs_prefix "$VERSION")$(nvm use --silent "$VERSION" && nvm_print_npm_version)"
OUTPUT="$(nvm use "$VERSION" >/dev/null && iojs $ARGS)"
EXIT_CODE="$?"
else
[ $NVM_SILENT -eq 1 ] || echo "Running node $VERSION$(nvm use --silent "$VERSION" && nvm_print_npm_version)"
OUTPUT="$(nvm use "$VERSION" >/dev/null && node $ARGS)"
EXIT_CODE="$?"
fi
if [ "$ZHS_HAS_SHWORDSPLIT_UNSET" -eq 1 ] && nvm_has "unsetopt"; then
unsetopt shwordsplit
fi
if [ -n "$OUTPUT" ]; then
echo "$OUTPUT"
fi
return $EXIT_CODE
;;
"exec" )
shift
local NVM_SILENT
NVM_SILENT=0
if [ "_$1" = "_--silent" ]; then
NVM_SILENT=1
shift
fi
local provided_version
provided_version="$1"
if [ -n "$provided_version" ]; then
VERSION="$(nvm_version "$provided_version")"
if [ "_$VERSION" = "_N/A" ] && ! nvm_is_valid_version "$provided_version"; then
if [ "$NVM_SILENT" -eq 1 ]; then
nvm_rc_version >/dev/null 2>&1
else
nvm_rc_version
fi
provided_version="$NVM_RC_VERSION"
VERSION="$(nvm_version "$provided_version")"
else
shift
fi
fi
nvm_ensure_version_installed "$provided_version"
EXIT_CODE=$?
if [ "$EXIT_CODE" != "0" ]; then
return $EXIT_CODE
fi
[ $NVM_SILENT -eq 1 ] || echo "Running node $VERSION$(nvm use --silent "$VERSION" && nvm_print_npm_version)"
NODE_VERSION="$VERSION" "$NVM_DIR/nvm-exec" "$@"
;;
"ls" | "list" )
local NVM_LS_OUTPUT
local NVM_LS_EXIT_CODE
NVM_LS_OUTPUT=$(nvm_ls "${2-}")
NVM_LS_EXIT_CODE=$?
nvm_print_versions "$NVM_LS_OUTPUT"
if [ $# -eq 1 ]; then
nvm alias
fi
return $NVM_LS_EXIT_CODE
;;
"ls-remote" | "list-remote" )
local PATTERN
PATTERN="${2-}"
local NVM_IOJS_PREFIX
NVM_IOJS_PREFIX="$(nvm_iojs_prefix)"
local NVM_NODE_PREFIX
NVM_NODE_PREFIX="$(nvm_node_prefix)"
local NVM_FLAVOR
case "_$PATTERN" in
"_$NVM_IOJS_PREFIX" | "_$NVM_NODE_PREFIX" )
NVM_FLAVOR="$PATTERN"
PATTERN="$3"
;;
esac
local NVM_LS_REMOTE_EXIT_CODE
NVM_LS_REMOTE_EXIT_CODE=0
local NVM_LS_REMOTE_PRE_MERGED_OUTPUT
NVM_LS_REMOTE_PRE_MERGED_OUTPUT=''
local NVM_LS_REMOTE_POST_MERGED_OUTPUT
NVM_LS_REMOTE_POST_MERGED_OUTPUT=''
if [ "_$NVM_FLAVOR" != "_$NVM_IOJS_PREFIX" ]; then
local NVM_LS_REMOTE_OUTPUT
NVM_LS_REMOTE_OUTPUT=$(nvm_ls_remote "$PATTERN")
# split output into two
NVM_LS_REMOTE_PRE_MERGED_OUTPUT="${NVM_LS_REMOTE_OUTPUT%%v4\.0\.0*}"
NVM_LS_REMOTE_POST_MERGED_OUTPUT="${NVM_LS_REMOTE_OUTPUT#$NVM_LS_REMOTE_PRE_MERGED_OUTPUT}"
NVM_LS_REMOTE_EXIT_CODE=$?
fi
local NVM_LS_REMOTE_IOJS_EXIT_CODE
NVM_LS_REMOTE_IOJS_EXIT_CODE=0
local NVM_LS_REMOTE_IOJS_OUTPUT
NVM_LS_REMOTE_IOJS_OUTPUT=''
if [ "_$NVM_FLAVOR" != "_$NVM_NODE_PREFIX" ]; then
NVM_LS_REMOTE_IOJS_OUTPUT=$(nvm_ls_remote_iojs "$PATTERN")
NVM_LS_REMOTE_IOJS_EXIT_CODE=$?
fi
local NVM_OUTPUT
NVM_OUTPUT="$(echo "$NVM_LS_REMOTE_PRE_MERGED_OUTPUT
$NVM_LS_REMOTE_IOJS_OUTPUT
$NVM_LS_REMOTE_POST_MERGED_OUTPUT" | command grep -v "N/A" | command sed '/^$/d')"
if [ -n "$NVM_OUTPUT" ]; then
nvm_print_versions "$NVM_OUTPUT"
return $NVM_LS_REMOTE_EXIT_CODE || $NVM_LS_REMOTE_IOJS_EXIT_CODE
else
nvm_print_versions "N/A"
return 3
fi
;;
"current" )
nvm_version current
;;
"which" )
local provided_version
provided_version="$2"
if [ $# -eq 1 ]; then
nvm_rc_version
if [ -n "$NVM_RC_VERSION" ]; then
provided_version="$NVM_RC_VERSION"
VERSION=$(nvm_version "$NVM_RC_VERSION")
fi
elif [ "_$2" != '_system' ]; then
VERSION="$(nvm_version "$provided_version")"
else
VERSION="$2"
fi
if [ -z "$VERSION" ]; then
>&2 nvm help
return 127
fi
if [ "_$VERSION" = '_system' ]; then
if nvm_has_system_iojs >/dev/null 2>&1 || nvm_has_system_node >/dev/null 2>&1; then
local NVM_BIN
NVM_BIN="$(nvm use system >/dev/null 2>&1 && command which node)"
if [ -n "$NVM_BIN" ]; then
echo "$NVM_BIN"
return
else
return 1
fi
else
echo "System version of node not found." >&2
return 127
fi
elif [ "_$VERSION" = "_∞" ]; then
echo "The alias \"$2\" leads to an infinite loop. Aborting." >&2
return 8
fi
nvm_ensure_version_installed "$provided_version"
EXIT_CODE=$?
if [ "$EXIT_CODE" != "0" ]; then
return $EXIT_CODE
fi
local NVM_VERSION_DIR
NVM_VERSION_DIR="$(nvm_version_path "$VERSION")"
echo "$NVM_VERSION_DIR/bin/node"
;;
"alias" )
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="$(nvm_alias_path)"
command mkdir -p "$NVM_ALIAS_DIR"
if [ $# -le 2 ]; then
local DEST
for ALIAS_PATH in "$NVM_ALIAS_DIR"/"${2-}"*; do
ALIAS="$(command basename "$ALIAS_PATH")"
DEST="$(nvm_alias "$ALIAS" 2> /dev/null)"
if [ -n "$DEST" ]; then
VERSION="$(nvm_version "$DEST")"
if [ "_$DEST" = "_$VERSION" ]; then
echo "$ALIAS -> $DEST"
else
echo "$ALIAS -> $DEST (-> $VERSION)"
fi
fi
done
for ALIAS in "$(nvm_node_prefix)" "stable" "unstable" "$(nvm_iojs_prefix)"; do
if [ ! -f "$NVM_ALIAS_DIR/$ALIAS" ]; then
if [ $# -lt 2 ] || [ "~$ALIAS" = "~$2" ]; then
DEST="$(nvm_print_implicit_alias local "$ALIAS")"
if [ "_$DEST" != "_" ]; then
VERSION="$(nvm_version "$DEST")"
if [ "_$DEST" = "_$VERSION" ]; then
echo "$ALIAS -> $DEST (default)"
else
echo "$ALIAS -> $DEST (-> $VERSION) (default)"
fi
fi
fi
fi
done
return
fi
if [ -z "${3-}" ]; then
command rm -f "$NVM_ALIAS_DIR/$2"
echo "$2 -> *poof*"
return
fi
VERSION="$(nvm_version "$3")"
if [ $? -ne 0 ]; then
echo "! WARNING: Version '$3' does not exist." >&2
fi
echo "$3" | tee "$NVM_ALIAS_DIR/$2" >/dev/null
if [ ! "_$3" = "_$VERSION" ]; then
echo "$2 -> $3 (-> $VERSION)"
else
echo "$2 -> $3"
fi
;;
"unalias" )
local NVM_ALIAS_DIR
NVM_ALIAS_DIR="$(nvm_alias_path)"
command mkdir -p "$NVM_ALIAS_DIR"
if [ $# -ne 2 ]; then
>&2 nvm help
return 127
fi
[ ! -f "$NVM_ALIAS_DIR/$2" ] && echo "Alias $2 doesn't exist!" >&2 && return
local NVM_ALIAS_ORIGINAL
NVM_ALIAS_ORIGINAL="$(nvm_alias "$2")"
command rm -f "$NVM_ALIAS_DIR/$2"
echo "Deleted alias $2 - restore it with \`nvm alias $2 "$NVM_ALIAS_ORIGINAL"\`"
;;
"reinstall-packages" | "copy-packages" )
if [ $# -ne 2 ]; then
>&2 nvm help
return 127
fi
local PROVIDED_VERSION
PROVIDED_VERSION="$2"
if [ "$PROVIDED_VERSION" = "$(nvm_ls_current)" ] || [ "$(nvm_version "$PROVIDED_VERSION")" = "$(nvm_ls_current)" ]; then
echo 'Can not reinstall packages from the current version of node.' >&2
return 2
fi
local VERSION
if [ "_$PROVIDED_VERSION" = "_system" ]; then
if ! nvm_has_system_node && ! nvm_has_system_iojs; then
echo 'No system version of node or io.js detected.' >&2
return 3
fi
VERSION="system"
else
VERSION="$(nvm_version "$PROVIDED_VERSION")"
fi
local NPMLIST
NPMLIST="$(nvm_npm_global_modules "$VERSION")"
local INSTALLS
local LINKS
INSTALLS="${NPMLIST%% //// *}"
LINKS="${NPMLIST##* //// }"
echo "Reinstalling global packages from $VERSION..."
echo "$INSTALLS" | command xargs npm install -g --quiet
echo "Linking global packages from $VERSION..."
set -f; IFS='
' # necessary to turn off variable expansion except for newlines
for LINK in $LINKS; do
set +f; unset IFS # restore variable expansion
if [ -n "$LINK" ]; then
(cd "$LINK" && npm link)
fi
done
set +f; unset IFS # restore variable expansion in case $LINKS was empty
;;
"clear-cache" )
command rm -f "$NVM_DIR/v*" "$(nvm_version_dir)" 2>/dev/null
echo "Cache cleared."
;;
"version" )
nvm_version "$2"
;;
"version-remote" )
nvm_remote_version "$2"
;;
"--version" )
echo "0.31.0"
;;
"unload" )
unset -f nvm nvm_print_versions nvm_checksum \
nvm_iojs_prefix nvm_node_prefix \
nvm_add_iojs_prefix nvm_strip_iojs_prefix \
nvm_is_iojs_version nvm_is_alias \
nvm_ls_remote nvm_ls_remote_iojs nvm_ls_remote_index_tab \
nvm_ls nvm_remote_version nvm_remote_versions \
nvm_install_iojs_binary nvm_install_node_binary \
nvm_install_node_source nvm_check_file_permissions \
nvm_version nvm_rc_version nvm_match_version \
nvm_ensure_default_set nvm_get_arch nvm_get_os \
nvm_print_implicit_alias nvm_validate_implicit_alias \
nvm_resolve_alias nvm_ls_current nvm_alias \
nvm_binary_available nvm_prepend_path nvm_strip_path \
nvm_num_version_groups nvm_format_version nvm_ensure_version_prefix \
nvm_normalize_version nvm_is_valid_version \
nvm_ensure_version_installed \
nvm_version_path nvm_alias_path nvm_version_dir \
nvm_find_nvmrc nvm_find_up nvm_tree_contains_path \
nvm_version_greater nvm_version_greater_than_or_equal_to \
nvm_print_npm_version nvm_npm_global_modules \
nvm_has_system_node nvm_has_system_iojs \
nvm_download nvm_get_latest nvm_has nvm_get_latest \
nvm_supports_source_options nvm_auto nvm_supports_xz \
nvm_process_parameters > /dev/null 2>&1
unset RC_VERSION NVM_NODEJS_ORG_MIRROR NVM_DIR NVM_CD_FLAGS > /dev/null 2>&1
;;
* )
>&2 nvm help
return 127
;;
esac
}
nvm_supports_source_options() {
[ "_$(echo '[ $# -gt 0 ] && echo $1' | . /dev/stdin yes 2> /dev/null)" = "_yes" ]
}
nvm_supports_xz() {
command which xz >/dev/null 2>&1 && nvm_version_greater_than_or_equal_to "$1" "2.3.2"
}
nvm_auto() {
local NVM_MODE
NVM_MODE="${1-}"
local VERSION
if [ "_$NVM_MODE" = '_install' ]; then
VERSION="$(nvm_alias default 2>/dev/null || echo)"
if [ -n "$VERSION" ]; then
nvm install "$VERSION" >/dev/null
elif nvm_rc_version >/dev/null 2>&1; then
nvm install >/dev/null
fi
elif [ "_$NVM_MODE" = '_use' ]; then
VERSION="$(nvm_alias default 2>/dev/null || echo)"
if [ -n "$VERSION" ]; then
nvm use --silent "$VERSION" >/dev/null
elif nvm_rc_version >/dev/null 2>&1; then
nvm use --silent >/dev/null
fi
elif [ "_$NVM_MODE" != '_none' ]; then
echo >&2 'Invalid auto mode supplied.'
return 1
fi
}
nvm_process_parameters() {
local NVM_AUTO_MODE
NVM_AUTO_MODE='use'
if nvm_supports_source_options; then
while [ $# -ne 0 ]
do
case "$1" in
--install) NVM_AUTO_MODE='install' ;;
--no-use) NVM_AUTO_MODE='none' ;;
esac
shift
done
fi
nvm_auto "$NVM_AUTO_MODE"
}
nvm_process_parameters "$@"
} # this ensures the entire script is downloaded #
|
Theodeus/nvm
|
nvm.sh
|
Shell
|
mit
| 73,708 |
#!/bin/bash
while :
do
python /home/chip/datakamp/MobileReader.py
sleep 4
echo "Python crashed"
done
|
kaosbeat/datakamp
|
startup.sh
|
Shell
|
mit
| 103 |
#!/bin/bash
# Remove these two incriminating pictures.... for now!
rm /home/paus/www/photos/2012/02/thumbs/2012\:02\:04-22\:11\:*
rm /home/paus/www/photos/2012/02/images/2012\:02\:04-22\:11\:*
|
cpausmit/Config
|
bin/photos/removeYes.sh
|
Shell
|
mit
| 193 |
#!/bin/sh
# as root
echo > /var/log/httpd/error_log
echo > /var/log/httpd/access_log
|
Fornost461/drafts-and-stuff
|
web/PHP-SQL/config files/logs/empty.sh
|
Shell
|
cc0-1.0
| 87 |
#!/bin/bash
if [ "$(wget http://www.museudoazulejo.gov.pt/ -o /dev/null -O -|grep -i x-shockwave-flash -c)" -eq "0" ]; then
echo "azulejo: incumprimento pode já não existir";
else
echo "azulejo: Incumprimento mantém-se, a actualizar o README (faça um git diff, valide, e commit!)";
while IFS='' read -r line || [[ -n "$line" ]]; do
test $(echo "$line"|grep -v azulejo|wc -l) -eq "1" \
&& echo "$line" \
|| (h=$(echo "$line"|cut -d\| -f1-4); t=$(echo "$line"|cut -d\| -f6-); echo "$h| $(date +%Y/%m/%d) |$t");
done < README.md > new
mv new README.md
fi
|
marado/RNID
|
scripts/22-azulejo.sh
|
Shell
|
cc0-1.0
| 570 |
#!/usr/bin/env bash
#
#loggertoken="a5d2fd23" ; source logger.sh ; # paste into script & uncomment
callhelp="$0 must be called using the one-liner command contained within."
#
# Summary
# Once loaded, provides the sourcing script with logging functions custom
# to the sourcerer.
# Note, logger.sh overrides 'echo' and 'printf' commands.
#
# Usage
# Add the single line (see the top of this script) to enable.
# This script must be on the PATH variable if called directly.
#
# Author
# James Knight
# jknightdev.com
# [email protected]
# @JamesKnight1337
#
# Development
# TODO: add verbosity to control what gets output to terminal. Log file should remain untouched.
#
# ---------------------------------------------------------------------80char\
me=`basename "$0"`
# This token being set in the calling script will be enough proof the script was
# sourced properly and so shares the same variable scope. Defensive bashing.
if [ "${loggertoken}" != "a5d2fd23" ]; then
# the script was not sourced correctly
echo "${callhelp}"
exit 1;
fi
# make sure this script wasn't called directly from the terminal (not supported)
if [ "${me}" == "bash" ] ; then
echo "logging from basename=\"bash\" not supported."
return 1;
fi
echo "target log file, path=\"${HOME}/${me}.params.log\""
function echo (){
command echo "${1}"
command echo "${1}" >> "${HOME}/${me}.params.log"
}
# $1 = full path of the file you want to pipe through to your log file.
function logcat () {
cat $1
cat $1 >> "${HOME}/${me}.params.log"
}
function printf (){
command printf "${1}"
command printf "${1}" >> "${HOME}/${me}.params.log"
}
|
jameswilliamknight/scripts.pub
|
Bash/provision/logger.sh
|
Shell
|
cc0-1.0
| 1,787 |
#!/bin/bash
###
# #%L
# che-starter
# %%
# Copyright (C) 2017 Red Hat, Inc.
# %%
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# #L%
###
eval "$(./env-toolkit load -f jenkins-env.json \
DEVSHIFT_TAG_LEN \
RHCHEBOT_DOCKER_HUB_PASSWORD \
GIT_COMMIT \
KEYCLOAK_TOKEN \
QUAY_USERNAME \
QUAY_PASSWORD)"
yum -y update
yum -y install epel-release
yum -y install centos-release-scl java-1.8.0-openjdk-devel docker curl jq
yum -y install rh-maven33
# TARGET variable gives ability to switch context for building rhel based images, default is "centos"
# If CI slave is configured with TARGET="rhel" RHEL based images should be generated then.
TARGET=${TARGET:-"centos"}
# Keycloak token provided by `che_functional_tests_credentials_wrapper` from `openshiftio-cico-jobs` is a refresh token.
# Obtaining osio user token
AUTH_RESPONSE=$(curl -H "Content-Type: application/json" -X POST -d '{"refresh_token":"'$KEYCLOAK_TOKEN'"}' https://auth.prod-preview.openshift.io/api/token/refresh)
# `OSIO_USER_TOKEN` is used for che-starter integration tests which are run against prod-preview
export OSIO_USER_TOKEN=$(echo $AUTH_RESPONSE | jq --raw-output ".token | .access_token")
systemctl start docker
scl enable rh-maven33 'mvn clean verify -B'
if [ $? -eq 0 ]; then
export PROJECT_VERSION=`mvn -o help:evaluate -Dexpression=project.version | grep -e '^[[:digit:]]'`
if [ $TARGET == "rhel" ]; then
DOCKERFILE="Dockerfile.rhel"
IMAGE_URL="quay.io/openshiftio/rhel-almighty-che-starter"
else
DOCKERFILE="Dockerfile"
IMAGE_URL="quay.io/openshiftio/almighty-che-starter"
fi
if [ -n "${QUAY_USERNAME}" -a -n "${QUAY_PASSWORD}" ]; then
docker login -u ${QUAY_USERNAME} -p ${QUAY_PASSWORD} quay.io
else
echo "Could not login, missing credentials for the registry"
fi
docker build -t rhche/che-starter:latest -f ${DOCKERFILE} .
if [ $? -ne 0 ]; then
echo 'Docker Build Failed!'
exit 2
fi
TAG=$(echo $GIT_COMMIT | cut -c1-${DEVSHIFT_TAG_LEN})
#push to docker.io ONLY if not RHEL
if [ $TARGET != "rhel" ]; then
docker login -u rhchebot -p $RHCHEBOT_DOCKER_HUB_PASSWORD -e [email protected]
docker tag rhche/che-starter:latest rhche/che-starter:$TAG
docker push rhche/che-starter:latest
docker push rhche/che-starter:$TAG
fi
docker tag rhche/che-starter:latest ${IMAGE_URL}:$TAG
docker push ${IMAGE_URL}:$TAG
docker tag rhche/che-starter:latest ${IMAGE_URL}:latest
docker push ${IMAGE_URL}:latest
else
echo 'Build Failed!'
exit 1
fi
|
redhat-developer/che-starter
|
cico_build_deploy.sh
|
Shell
|
epl-1.0
| 2,772 |
#! /bin/bash
#############################################################################
# Copyright (c) 2003-2005,2007-2009 Novell, Inc.
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact Novell, Inc.
#
# To contact Novell about this file by physical or electronic mail,
# you may find current contact information at www.novell.com
#############################################################################
source $(dirname $0)/../rpm/config.sh
source $(dirname $0)/wd-functions.sh
set -o pipefail
have_arch_patches=false
fuzz="-F0"
case "$IBS_PROJECT" in
SUSE:SLE-9*)
fuzz=
have_arch_patches=true
esac
usage() {
cat <<END
SYNOPSIS: $0 [-qv] [--symbol=...] [--dir=...]
[--fast] [last-patch-name] [--vanilla] [--fuzz=NUM]
[--patch-dir=PATH] [--build-dir=PATH] [--config=ARCH-FLAVOR [--kabi]]
[--ctags] [--cscope] [--no-xen] [--skip-reverse]
The --build-dir option supports internal shell aliases, like ~, and variable
expansion when the variables are properly escaped. Environment variables
and the following list of internal variables are permitted:
\$PATCH_DIR: The expanded source tree
\$SRCVERSION: The current linux source tarball version
\$TAG: The current tag or branch of this repo
\$EXT: A string expanded from current \$EXTRA_SYMBOLS
With --config=ARCH-FLAVOR, these have values. Otherwise they are empty.
\$CONFIG: The current ARCH-FLAVOR.
\$CONFIG_ARCH: The current ARCH.
\$CONFIG_FLAVOR: The current FLAVOR.
The --no-quilt option will still create quilt-style backups for each
file that is modified but the backups will be removed if the patch
is successful. This can be fast because the new files may be created
and removed before writeback occurs so they only exist in memory. A
failed patch will be rolled back and the caller will be able to diagnose it.
The --fast option will concatenate all the patches to be applied and
call patch just once. This is even faster than --no-quilt but if any
of the component patches fail to apply the tree will not be rolled
back.
When used with last-patch-name or --no-xen, both --fast and --no-quilt
will set up a quilt environment for the remaining patches.
END
exit 1
}
apply_fast_patches() {
echo "[ Fast-applying ${#PATCHES_BEFORE[@]} patches. ${#PATCHES_AFTER[@]} remain. ]"
LAST_LOG=$(cat "${PATCHES_BEFORE[@]}" | \
patch -d $PATCH_DIR -p1 -E $fuzz --force --no-backup-if-mismatch \
-s 2>&1)
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "$LAST_LOG" >> $PATCH_LOG
[ -n "$QUIET" ] && echo "$LAST_LOG"
echo "All-in-one patch failed (not rolled back)."
echo "Logfile: $PATCH_LOG"
status=1
fi
PATCHES=( ${PATCHES_AFTER[@]} )
}
SKIPPED_PATCHES=
# Patch kernel normally
apply_patches() {
set -- "${PATCHES[@]}"
n=0
while [ $# -gt 0 ]; do
PATCH="$1"
if ! $QUILT && test "$PATCH" = "$LIMIT"; then
STEP_BY_STEP=1
echo "Stopping before $PATCH"
fi
if [ -n "$STEP_BY_STEP" ]; then
while true; do
echo -n "Continue ([y]es/[n]o/yes to [a]ll)?"
read YESNO
case $YESNO in
([yYjJsS])
break
;;
([nN])
break 2 # break out of outer loop
;;
([aA])
unset STEP_BY_STEP
break
;;
esac
done
fi
if [ ! -r "$PATCH" ]; then
echo "Patch $PATCH not found."
status=1
break
fi
echo "[ $PATCH ]"
echo "[ $PATCH ]" >> $PATCH_LOG
backup_dir=$PATCH_DIR/.pc/$PATCH
LAST_LOG=$(patch -d $PATCH_DIR --backup --prefix=$backup_dir/ -p1 -E $fuzz \
--no-backup-if-mismatch --force < $PATCH 2>&1)
STATUS=$?
if [ $STATUS -ne 0 ]; then
restore_files $backup_dir $PATCH_DIR
if $SKIP_REVERSE; then
patch -R -d $PATCH_DIR -p1 -E $fuzz --force --dry-run \
< $PATCH > /dev/null 2>&1
ST=$?
if [ $ST -eq 0 ]; then
LAST_LOG="[ skipped: can be reverse-applied ]"
[ -n "$QUIET" ] && echo "$LAST_LOG"
STATUS=0
SKIPPED_PATCHES="$SKIPPED_PATCHES $PATCH"
PATCH="# $PATCH"
remove_rejects $backup_dir $PATCH_DIR
fi
fi
# Backup directory is no longer needed
rm -rf $backup_dir
else
if $QUILT; then
echo "$PATCH" >> $PATCH_DIR/.pc/applied-patches
fi
fi
if ! $QUILT; then
rm -rf $PATCH_DIR/.pc/
fi
echo "$LAST_LOG" >> $PATCH_LOG
[ -z "$QUIET" ] && echo "$LAST_LOG"
if [ $STATUS -ne 0 ]; then
[ -n "$QUIET" ] && echo "$LAST_LOG"
echo "Patch $PATCH failed (rolled back)."
echo "Logfile: $PATCH_LOG"
status=1
break
else
echo "$SERIES_PFX$PATCH" >> $PATCH_DIR/series
fi
shift
if $QUILT; then
unset PATCHES[$n]
fi
let n++
if $QUILT && test "$PATCH" = "$LIMIT"; then
break
fi
done
}
show_skipped() {
if [ -n "$SKIPPED_PATCHES" ]; then
echo "The following patches were skipped and can be removed from series.conf:"
for p in $SKIPPED_PATCHES; do
echo "$p"
done
fi
}
# Allow to pass in default arguments via SEQUENCE_PATCH_ARGS.
set -- $SEQUENCE_PATCH_ARGS "$@"
if $have_arch_patches; then
arch_opt="arch:"
else
arch_opt=""
fi
options=`getopt -o qvd:F: --long quilt,no-quilt,$arch_opt,symbol:,dir:,combine,fast,vanilla,fuzz,patch-dir:,build-dir:,config:,kabi,ctags,cscope,no-xen,skip-reverse -- "$@"`
if [ $? -ne 0 ]
then
usage
fi
eval set -- "$options"
QUIET=1
EXTRA_SYMBOLS=
QUILT=true
FAST=
VANILLA=false
SP_BUILD_DIR=
CONFIG=
CONFIG_ARCH=
CONFIG_FLAVOR=
KABI=false
CTAGS=false
CSCOPE=false
SKIP_XEN=false
SKIP_REVERSE=false
while true; do
case "$1" in
-q)
QUIET=1
;;
-v)
QUIET=
;;
--quilt)
QUILT=true
;;
--no-quilt)
QUILT=false
;;
--combine)
# ignored
;;
--fast)
FAST=1
;;
--arch)
export PATCH_ARCH=$2
shift
;;
--symbol)
EXTRA_SYMBOLS="$EXTRA_SYMBOLS $2"
shift
;;
-d|--dir)
SCRATCH_AREA=$2
shift
;;
--vanilla)
VANILLA=true
;;
-F|--fuzz)
fuzz="-F$2"
shift
;;
--patch-dir)
PATCH_DIR=$2
shift
;;
--build-dir)
SP_BUILD_DIR="$2"
shift
;;
--config)
CONFIG="$2"
shift
;;
--kabi)
KABI=true
;;
--ctags)
CTAGS=true
;;
--cscope)
CSCOPE=true
;;
--no-xen)
SKIP_XEN=true
;;
--skip-reverse)
SKIP_REVERSE=true
;;
--)
shift
break ;;
*)
usage ;;
esac
shift
done
unset LIMIT
if [ $# -ge 1 ]; then
LIMIT=$1
shift
fi
if test -z "$CONFIG"; then
if test "$VANILLA_ONLY" = 1 || $VANILLA; then
CONFIG=$(uname -m)-vanilla
else
machine=$(uname -m)
case "$machine" in
i?86)
machine=i386
esac
if test -e "config/$machine/smp"; then
CONFIG=$machine-smp
elif test -e "config/$machine/pae"; then
CONFIG=$machine-pae
elif test -e "config/$machine/default"; then
CONFIG=$machine-default
elif test -e "config/$machine/rt"; then
CONFIG=$machine-rt
else
echo "Cannot determine default config for arch $machine"
fi
fi
fi
if test -n "$CONFIG"; then
CONFIG_ARCH=${CONFIG%%-*}
CONFIG_FLAVOR=${CONFIG##*-}
if [ "$CONFIG" = "$CONFIG_ARCH" -o "$CONFIG" = "$CONFIG_FLAVOR" -o \
-z "$CONFIG_ARCH" -o -z "$CONFIG_FLAVOR" ]; then
echo "Invalid config spec: --config=ARCH-FLAVOR is expected."
usage
fi
fi
if [ $# -ne 0 ]; then
usage
fi
if ! scripts/guards --prefix=config $(scripts/arch-symbols --list) < config.conf | \
egrep -q '/(xen|ec2|pv)$'; then
echo "*** Xen configs are disabled; Skipping Xen patches." >&2
SKIP_XEN=true
fi
# Some patches require patch 2.5.4. Abort with older versions.
PATCH_VERSION=$(patch -v | sed -e '/^patch/!d' -e 's/patch //')
case $PATCH_VERSION in
([01].*|2.[1-4].*|2.5.[1-3]) # (check if < 2.5.4)
echo "patch version $PATCH_VERSION found; " \
"a version >= 2.5.4 required." >&2
exit 1
;;
esac
# Check SCRATCH_AREA.
if [ -z "$SCRATCH_AREA" ]; then
echo "SCRATCH_AREA not defined (defaulting to \"tmp\")"
SCRATCH_AREA=tmp
fi
if [ ! -d "$SCRATCH_AREA" ]; then
if ! mkdir -p $SCRATCH_AREA; then
echo "creating scratch dir $SCRATCH_AREA failed"
exit 1
fi
fi
[ "${SCRATCH_AREA:0:1}" != "/" ] \
&& SCRATCH_AREA="$PWD/$SCRATCH_AREA"
TMPDIR=$SCRATCH_AREA
export TMPDIR
ORIG_DIR=$SCRATCH_AREA/linux-$SRCVERSION.orig
TAG=$(get_branch_name)
TAG=${TAG//\//_}
if $VANILLA; then
TAG=${TAG}-vanilla
fi
PATCH_LOG=$SCRATCH_AREA/patch-$SRCVERSION${TAG:+-$TAG}.log
# Check series.conf.
if [ ! -r series.conf ]; then
echo "Configuration file \`series.conf' not found"
exit 1
fi
if $have_arch_patches; then
if [ -z "$ARCH_SYMBOLS" ]; then
if [ -x ./arch-symbols ]; then
ARCH_SYMBOLS=./arch-symbols
elif [ -x scripts/arch-symbols ]; then
ARCH_SYMBOLS=scripts/arch-symbols
else
echo "Cannot locate \`arch-symbols' script (export ARCH_SYMBOLS)"
exit 1
fi
else
if [ ! -x "$ARCH_SYMBOLS" ]; then
echo "Cannot execute \`arch-symbols' script"
exit 1
fi
fi
SYMBOLS=$($ARCH_SYMBOLS)
if [ -z "$SYMBOLS" ]; then
echo "Unsupported architecture \`$ARCH'" >&2
exit 1
fi
echo "Architecture symbol(s): $SYMBOLS"
fi
if [ -s extra-symbols ]; then
EXTRA_SYMBOLS="$EXTRA_SYMBOLS $(cat extra-symbols)"
fi
if [ -n "$EXTRA_SYMBOLS" ]; then
EXTRA_SYMBOLS=${EXTRA_SYMBOLS# }
echo "Extra symbols: $EXTRA_SYMBOLS"
SYMBOLS="$SYMBOLS $EXTRA_SYMBOLS"
fi
EXT=${EXTRA_SYMBOLS// /-}
EXT=${EXT//\//}
if test -z "$PATCH_DIR"; then
PATCH_DIR=$SCRATCH_AREA/linux-$SRCVERSION${TAG:+-$TAG}${EXT:+-}$EXT
fi
if [ -n "$SP_BUILD_DIR" ]; then
# This allows alias (~) and variable expansion
SP_BUILD_DIR=$(eval echo "$SP_BUILD_DIR")
else
SP_BUILD_DIR="$PATCH_DIR"
fi
echo "Creating tree in $PATCH_DIR"
# Clean up from previous run
rm -f "$PATCH_LOG"
if [ -e $PATCH_DIR ]; then
echo "Cleaning up from previous run"
rm -rf $PATCH_DIR
fi
# Create fresh $SCRATCH_AREA/linux-$SRCVERSION.
if ! [ -d $ORIG_DIR ]; then
unpack_tarball "$SRCVERSION" "$ORIG_DIR"
find $ORIG_DIR -type f | xargs chmod a-w,a+r
fi
if $VANILLA; then
PATCHES=( $(scripts/guards $SYMBOLS < series.conf | egrep '^patches\.(kernel\.org|rpmify)/') )
else
PATCHES=( $(scripts/guards $SYMBOLS < series.conf) )
fi
# Check if patch $LIMIT exists
if [ -n "$LIMIT" ] || $SKIP_XEN; then
for ((n=0; n<${#PATCHES[@]}; n++)); do
if [ "$LIMIT" = - ]; then
LIMIT=${PATCHES[n]}
break
fi
case "${PATCHES[n]}" in
$LIMIT|*/$LIMIT)
LIMIT=${PATCHES[n]}
break
;;
patches.xen/*)
if $SKIP_XEN; then
LIMIT=${PATCHES[n-1]}
break
fi
;;
esac
done
if [ -n "$LIMIT" ] && ((n == ${#PATCHES[@]})); then
echo "No patch \`$LIMIT' found."
exit 1
fi
PATCHES_BEFORE=()
for ((m=0; m<n; m++)); do
PATCHES_BEFORE[m]=${PATCHES[m]}
done
PATCHES_AFTER=()
for ((m=n; m<${#PATCHES[@]}; m++)); do
PATCHES_AFTER[m-n]=${PATCHES[m]}
done
else
PATCHES_BEFORE=( "${PATCHES[@]}" )
PATCHES_AFTER=()
fi
# Helper function to restore files backed up by patch. This is
# faster than doing a --dry-run first.
restore_files() {
local backup_dir=$1 patch_dir=$2 file
local -a remove restore
if [ -d $backup_dir ]; then
pushd $backup_dir > /dev/null
for file in $(find . -type f) ; do
if [ -s "$file" ]; then
restore[${#restore[@]}]="$file"
else
remove[${#remove[@]}]="$file"
fi
done
#echo "Restore: ${restore[@]}"
[ ${#restore[@]} -ne 0 ] \
&& printf "%s\n" "${restore[@]}" \
| xargs cp -f --parents --target $patch_dir
cd $patch_dir
#echo "Remove: ${remove[@]}"
[ ${#remove[@]} -ne 0 ] \
&& printf "%s\n" "${remove[@]}" | xargs rm -f
popd > /dev/null
fi
}
# Helper function to remove stray .rej files.
remove_rejects() {
local backup_dir=$1 patch_dir=$2 file
local -a remove
if [ -d $backup_dir ]; then
pushd $backup_dir > /dev/null
for file in $(find . -type f) ; do
if [ -f "$patch_dir/$file.rej" ]; then
remove[${#remove[@]}]="$file.rej"
fi
done
cd $patch_dir
#echo "Remove rejects: ${remove[@]}"
[ ${#remove[@]} -ne 0 ] \
&& printf "%s\n" "${remove[@]}" | xargs rm -f
popd > /dev/null
fi
}
# Create hardlinked source tree
echo "Linking from $ORIG_DIR"
cp -rld $ORIG_DIR $PATCH_DIR
# create a relative symlink
ln -snf ${PATCH_DIR#$SCRATCH_AREA/} $SCRATCH_AREA/current
echo -e "# Symbols: $SYMBOLS\n#" > $PATCH_DIR/series
SERIES_PFX=
if ! $QUILT; then
SERIES_PFX="# "
fi
mkdir $PATCH_DIR/.pc
echo 2 > $PATCH_DIR/.pc/.version
if [ -z "$FAST" ]; then
apply_patches
else
apply_fast_patches
fi
if [ -n "$EXTRA_SYMBOLS" ]; then
echo "$EXTRA_SYMBOLS" > $PATCH_DIR/extra-symbols
fi
if ! $QUILT; then
rm $PATCH_DIR/series
fi
ln -s $PWD $PATCH_DIR/patches
ln -s patches/scripts/{refresh_patch,run_oldconfig}.sh $PATCH_DIR/
if $VANILLA; then
touch "$PATCH_DIR/.is_vanilla"
fi
if $QUILT; then
[ -r $HOME/.quiltrc ] && . $HOME/.quiltrc
[ ${QUILT_PATCHES-patches} != patches ] \
&& ln -s $PWD $PATCH_DIR/${QUILT_PATCHES-patches}
fi
echo "[ Tree: $PATCH_DIR ]"
if test "$SP_BUILD_DIR" != "$PATCH_DIR"; then
mkdir -p "$SP_BUILD_DIR"
echo "[ Build Dir: $SP_BUILD_DIR ]"
rm -f "$SP_BUILD_DIR/source"
rm -f "$SP_BUILD_DIR/patches"
ln -sf "$PATCH_DIR" "$SP_BUILD_DIR/source"
ln -sf "source/patches" "$SP_BUILD_DIR/patches"
fi
# If there are any remaining patches, add them to the series so
# they can be fixed up with quilt (or similar).
if [ -n "${PATCHES[*]}" ]; then
( IFS=$'\n' ; echo "${PATCHES[*]}" ) >> $PATCH_DIR/series
fi
show_skipped
if test "0$status" -ne 0; then
exit $status
fi
if test -e supported.conf; then
echo "[ Generating Module.supported ]"
scripts/guards base external < supported.conf > "$SP_BUILD_DIR/Module.supported"
fi
if test -n "$CONFIG"; then
if test -e "config/$CONFIG_ARCH/$CONFIG_FLAVOR"; then
echo "[ Copying config/$CONFIG_ARCH/$CONFIG_FLAVOR ]"
cp -a "config/$CONFIG_ARCH/$CONFIG_FLAVOR" "$SP_BUILD_DIR/.config"
else
echo "[ Config $CONFIG does not exist. ]"
fi
if $KABI; then
if [ ! -x rpm/modversions ]; then
echo "[ This branch does not support the modversions kABI mechanism. Skipping. ]"
elif [ -e "kabi/$CONFIG_ARCH/symtypes-$CONFIG_FLAVOR" ]; then
echo "[ Expanding kABI references for $CONFIG ]"
rpm/modversions --unpack "$SP_BUILD_DIR" < \
"kabi/$CONFIG_ARCH/symtypes-$CONFIG_FLAVOR"
else
echo "[ No kABI references for $CONFIG ]"
fi
fi
fi
# Some archs we use for the config do not exist or have a different name in the
# kernl source tree
case $CONFIG_ARCH in
s390x) TAGS_ARCH=s390 ;;
ppc64|ppc64le) TAGS_ARCH=powerpc ;;
*) TAGS_ARCH=$CONFIG_ARCH ;;
esac
if $CTAGS; then
if ctags --version > /dev/null; then
echo "[ Generating ctags (this may take a while)]"
ARCH=$TAGS_ARCH make -s --no-print-directory -C "$PATCH_DIR" O="$SP_BUILD_DIR" tags
else
echo "[ Could not generate ctags: ctags not found ]"
fi
fi
if $CSCOPE; then
if cscope -V 2> /dev/null; then
echo "[ Generating cscope db (this may take a while)]"
ARCH=$TAGS_ARCH make -s --no-print-directory -C "$PATCH_DIR" O="$SP_BUILD_DIR" cscope
else
echo "[ Could not generate cscope db: cscope not found ]"
fi
fi
|
marxin/kernel-source
|
scripts/sequence-patch.sh
|
Shell
|
gpl-2.0
| 16,417 |
#! /usr/bin/env bash
#
# Copyright (C) 2013-2015 Zhang Rui <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IJK_FFMPEG_UPSTREAM=git://git.videolan.org/ffmpeg.git
IJK_FFMPEG_UPSTREAM=https://github.com/Bilibili/FFmpeg.git
IJK_FFMPEG_FORK=https://github.com/Bilibili/FFmpeg.git
IJK_FFMPEG_COMMIT=ff3.1--ijk0.6.2--20160926--001
IJK_FFMPEG_LOCAL_REPO=extra/ffmpeg
IJK_GASP_UPSTREAM=https://github.com/Bilibili/gas-preprocessor.git
# gas-preprocessor backup
# https://github.com/Bilibili/gas-preprocessor.git
if [ "$IJK_FFMPEG_REPO_URL" != "" ]; then
IJK_FFMPEG_UPSTREAM=$IJK_FFMPEG_REPO_URL
IJK_FFMPEG_FORK=$IJK_FFMPEG_REPO_URL
fi
if [ "$IJK_GASP_REPO_URL" != "" ]; then
IJK_GASP_UPSTREAM=$IJK_GASP_REPO_URL
fi
set -e
TOOLS=tools
FF_ALL_ARCHS_IOS6_SDK="armv7 armv7s i386"
FF_ALL_ARCHS_IOS7_SDK="armv7 armv7s arm64 i386 x86_64"
FF_ALL_ARCHS_IOS8_SDK="armv7 arm64 i386 x86_64"
FF_ALL_ARCHS=$FF_ALL_ARCHS_IOS8_SDK
FF_TARGET=$1
function echo_ffmpeg_version() {
echo $IJK_FFMPEG_COMMIT
}
function pull_common() {
git --version
echo "== pull gas-preprocessor base =="
sh $TOOLS/pull-repo-base.sh $IJK_GASP_UPSTREAM extra/gas-preprocessor
echo "== pull ffmpeg base =="
sh $TOOLS/pull-repo-base.sh $IJK_FFMPEG_UPSTREAM $IJK_FFMPEG_LOCAL_REPO
}
function pull_fork() {
echo "== pull ffmpeg fork $1 =="
sh $TOOLS/pull-repo-ref.sh $IJK_FFMPEG_FORK ios/ffmpeg-$1 ${IJK_FFMPEG_LOCAL_REPO}
cd ios/ffmpeg-$1
git checkout ${IJK_FFMPEG_COMMIT} -B ijkplayer
cd -
}
function pull_fork_all() {
for ARCH in $FF_ALL_ARCHS
do
pull_fork $ARCH
done
}
#----------
case "$FF_TARGET" in
ffmpeg-version)
echo_ffmpeg_version
;;
armv7|armv7s|arm64|i386|x86_64)
pull_common
pull_fork $FF_TARGET
;;
all|*)
pull_common
pull_fork_all
;;
esac
|
307509256/ijkplayer-plus
|
init-ios.sh
|
Shell
|
gpl-2.0
| 2,378 |
#!/bin/bash
# time to run in seconds
RUN_TIME_SECONDS=$1
# wait between checks
WAIT_TIME_SECONDS=$2
MYSQL_USER=$3
MYSQL_PASSWORD=""
MYSQL_SOCKET=$4
LOG_NAME=$5
# kill existing log file if it exists
rm -f $LOG_NAME
while [ $RUN_TIME_SECONDS -gt 0 ]; do
echo "******************************" >> $LOG_NAME
date >> $LOG_NAME
echo "******************************" >> $LOG_NAME
$DB_DIR/bin/mysqladmin --user=$MYSQL_USER --password=$MYSQL_PASSWORD --socket=$MYSQL_SOCKET extended-status >> $LOG_NAME
RUN_TIME_SECONDS=$(($RUN_TIME_SECONDS - $WAIT_TIME_SECONDS))
sleep $WAIT_TIME_SECONDS
done
|
Percona-QA/toku-qa
|
bin/capture-extended-status.bash
|
Shell
|
gpl-2.0
| 613 |
#!/bin/bash
make distclean
echo "TBS drivers set for x86 Linux 3.x"
./v4l/tbs-x86_r3.sh
#./v4l/tbs-dvbc-x86_r3.sh
# Enable some staging drivers
make stagingconfig
echo "TBS drivers building..."
make -j2
echo "TBS drivers installing..."
sudo rm -r -f /lib/modules/$(uname -r)/extra
sudo make install
echo "TBS drivers installation done"
echo "You need to reboot..."
|
work40/linux-tbs-drivers
|
tbs_install_lnx3x_x86.sh
|
Shell
|
gpl-2.0
| 371 |
#!/bin/bash
INTERFACES_FILE="${TARGET_DIR}/etc/network/interfaces"
SEARCH_CRITERIA="eth0"
NONE_FOUND="0"
RESULT_FROM_COMPARISON=$(grep -c $SEARCH_CRITERIA $INTERFACES_FILE)
echo "$RESULT_FROM_COMPARISON"
if [ "$RESULT_FROM_COMPARISON" == "$NONE_FOUND" ]
then
echo "auto eth0" >> $INTERFACES_FILE
echo "iface eth0 inet static" >> $INTERFACES_FILE
echo "address 192.168.1.2" >> $INTERFACES_FILE
echo "netmask 255.255.128.0" >> $INTERFACES_FILE
fi
|
linux4hach/buildroot-at91
|
support/scripts/setNetwork.sh
|
Shell
|
gpl-2.0
| 466 |
#!/bin/env bash
THIS=`basename $0 '.sh'`
DATE=`date +%F`
FILE="$THIS-$HOSTNAME-$DATE-all"
OLD_PKG="old-packages"
NEW_PKG="new-packages"
OLD_PKG_NAME="$OLD_PKG-name"
NEW_PKG_NAME="$NEW_PKG-name"
NEW_PKG_NAME_INSTALL="$NEW_PKG_NAME-install"
_pre_install() {
(
IFS=$'\n'
for package in `rpm -qa --qf '%{NAME} %{VERSION}-%{RELEASE} %{ARCH}\n' | sort`; do
IFS=$' '
echo "$package" | while read name version release; do
printf "%-40s %-40s %-10s\n" "$name" "$version" "$release"
done
done
) 2>&1 | tee "$FILE-$OLD_PKG.txt"
}
_post_install() {
cat "$FILE-$OLD_PKG.txt" | cut -d' ' -f1 | grep 'release$'
cat "$FILE-$OLD_PKG.txt" | cut -d' ' -f1 | sort | uniq > "$FILE-$OLD_PKG_NAME.txt"
rpm -qa --qf '%{NAME}\n' | sort | uniq > "$FILE-$NEW_PKG_NAME.txt"
diff -u "$FILE-$OLD_PKG_NAME.txt" "$FILE-$NEW_PKG_NAME.txt" | grep '^-' | sed 's/^-//' > "$FILE-$NEW_PKG_NAME_INSTALL.txt"
}
case "$1" in
pre-install ) _pre_install ;;
post-install ) _post_install ;;
* ) echo "Usage: $THIS pre-install || post-install" ;;
esac
exit
|
zappyk-github/zappyk-ocean
|
root/bin/upgrade-rpm-query.sh
|
Shell
|
gpl-2.0
| 1,134 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2008-2018 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
. ./tup.sh
cat > Tupfile << HERE
: foreach *.c |> gcc -c %f -o %o |> %B.o
: *.o fs/built-in.o |> gcc %f -o %o |> prog.exe
HERE
tmkdir fs
cat > fs/Tupfile << HERE
: foreach *.c |> gcc -c %f -o %o |> %B.o
: *.o |> ld -r %f -o built-in.o |> built-in.o
HERE
echo "int main(void) {return 0;}" > main.c
echo "void ext3fs(void) {}" > fs/ext3.c
echo "void ext4fs(void) {}" > fs/ext4.c
tup touch Tupfile main.c fs/ext3.c fs/ext4.c fs/Tupfile
update
sym_check prog.exe main ext3fs ext4fs
eotup
|
jonatanolofsson/tup
|
test/t4004-link-subdir.sh
|
Shell
|
gpl-2.0
| 1,239 |
# test suite that always succeds - for testing framework
run()
{
run_testcase true
return 0
}
|
joyent/libfaketime
|
test/functests/test_true.sh
|
Shell
|
gpl-2.0
| 97 |
#!/bin/bash -ue
skip=true
NUMC=${NUMC:-3}
SDURATION=${SDURATION:-300}
TSIZE=${TSIZE:-1000}
NUMT=${NUMT:-16}
STEST=${STEST:-oltp}
AUTOINC=${AUTOINC:-off}
TCOUNT=${TCOUNT:-10}
SHUTDN=${SHUTDN:-yes}
TXRATE=${TXRATE:-120}
RSLEEP=${RSLEEP:-10}
RMOVE=${RMOVE:-1}
LOSS="${LOSS:-1%}"
DELAY="${DELAY:-3ms}"
CMD=${CMD:-"/pxc/bin/mysqld --defaults-extra-file=/pxc/my.cnf --basedir=/pxc --user=mysql --skip-grant-tables --innodb-buffer-pool-size=500M --innodb-log-file-size=100M --query_cache_type=0 --wsrep_slave_threads=16 --innodb_autoinc_lock_mode=2 --query_cache_size=0 --innodb_flush_log_at_trx_commit=0 --innodb_file_per_table "}
LPATH=${SPATH:-/usr/share/doc/sysbench/tests/db}
thres=1
RANDOM=$$
BUILD_NUMBER=${BUILD_NUMBER:-$RANDOM}
SLEEPCNT=${SLEEPCNT:-10}
FSYNC=${FSYNC:-0}
TMPD=${TMPDIR:-/tmp}
ALLINT=${ALLINT:-1}
COREDIR=${COREDIR:-/var/crash}
ECMD=${EXTRA_CMD:-" --wsrep-sst-method=rsync --core-file "}
RSEGMENT=${RSEGMENT:-1}
LOSSNO=${LOSSNO:-1}
PROVIDER=${EPROVIDER:-0}
HOSTSF="$PWD/hosts"
EXCL=${EXCL:-1}
VSYNC=${VSYNC:-1}
CATAL=${COREONFATAL:-0}
if [[ ${BDEBUG:-0} -eq 1 ]];then
set -x
fi
SOCKS=""
SOCKPATH="/tmp/pxc-socks"
SDIR="$LPATH"
export PATH="/usr/sbin:$PATH"
linter="eth0"
FORCE_FTWRL=${FORCE_FTWRL:-0}
FIRSTD=$(cut -d" " -f1 <<< $DELAY | tr -d 'ms')
RESTD=$(cut -d" " -f2- <<< $DELAY)
echo "
[sst]
sst-initial-timeout=$(( 50*NUMC ))
" > /tmp/my.cnf
if [[ $NUMC -lt 3 ]];then
echo "Specify at least 3 for nodes"
exit 1
fi
# Hack for jenkins only. uh..
if [[ -n ${BUILD_NUMBER:-} && $(groups) != *docker* ]]; then
exec sg docker "$0 $*"
fi
if [[ $PROVIDER == '1' ]];then
CMD+=" --wsrep-provider=/pxc/libgalera_smm.so"
PGALERA=" -v $PWD/libgalera_smm.so:/pxc/libgalera_smm.so -v /tmp/my.cnf:/pxc/my.cnf"
#cp -v $PWD/libgalera_smm.so /pxc/
else
PGALERA="-v /tmp/my.cnf:/pxc/my.cnf"
fi
pushd ../docker-tarball
count=$(ls -1ct Percona-XtraDB-Cluster-*.tar.gz | wc -l)
if [[ $count -eq 0 ]];then
echo "FATAL: Need tar.gz"
exit 2
fi
if [[ $count -gt $thres ]];then
for fl in `ls -1ct Percona-XtraDB-Cluster-*.tar.gz | tail -n +2`;do
rm -f $fl || true
done
fi
find . -maxdepth 1 -type d -name 'Percona-XtraDB-Cluster-*' -exec rm -rf {} \+ || true
TAR=`ls -1ct Percona-XtraDB-Cluster-*.tar.gz | head -n1`
BASE="$(tar tf $TAR | head -1 | tr -d '/')"
tar -xf $TAR
rm -rf Percona-XtraDB-Cluster || true
mv $BASE Percona-XtraDB-Cluster
NBASE=$PWD/Percona-XtraDB-Cluster
MD5=$(md5sum < $TAR | cut -d" " -f1)
if [[ ! -e $TMPD/MD5FILE ]];then
skip=false
echo -n $MD5 > $TMPD/MD5FILE
else
EMD5=$(cat $TMPD/MD5FILE)
if [[ $MD5 != $EMD5 ]];then
echo -n $MD5 > $TMPD/MD5FILE
skip=false
else
skip=true
fi
fi
popd
#if git log --summary -1 -p | grep -q '/Dockerfile';then
#skip=false
#fi
if [[ $FORCEBLD == 1 ]];then
skip=false
fi
LOGDIR="$TMPD/logs/$BUILD_NUMBER"
mkdir -p $LOGDIR
runum(){
local cmd="$1"
for x in `seq 1 50`; do
eval $cmd Dock$x
done
}
runc(){
local cont=$1
shift
local cmd1=$1
shift
local cmd2=$1
local ecmd
if [[ $cmd1 == 'mysql' ]];then
ecmd=-e
else
ecmd=""
fi
local hostt=$(docker port $cont 3306)
local hostr=$(cut -d: -f1 <<< $hostt)
local portr=$(cut -d: -f2 <<< $hostt)
$cmd1 -h $hostr -P $portr -u root $ecmd "$cmd2"
}
cleanup(){
local cnt
set +e
for s in `seq 1 $NUMC`;do
docker logs -t Dock$s &>$LOGDIR/Dock$s.log
done
docker logs -t dnscluster > $LOGDIR/dnscluster.log
if [[ $SHUTDN == 'yes' ]];then
docker stop dnscluster &>/dev/null
docker rm -f dnscluster &>/dev/null
echo "Stopping docker containers"
runum "docker stop" &>/dev/null
echo "Removing containers"
runum "docker rm -f " &>/dev/null
fi
pkill -9 -f socat
rm -rf $SOCKPATH && mkdir -p $SOCKPATH
#rm -rf $LOGDIR
now=$(date +%s)
for s in `seq 1 $NUMC`;do
sudo journalctl --since=$(( then-now )) | grep "Dock${s}-" > $LOGDIR/journald-Dock${s}.log
done
sudo journalctl -b > $LOGDIR/journald-all.log
tar cvzf $TMPD/results-${BUILD_NUMBER}.tar.gz $LOGDIR
set -e
echo "Checking for core files"
if [[ "$(ls -A $COREDIR)" ]];then
echo "Core files found"
for cor in $COREDIR/*.core;do
cnt=$(cut -d. -f1 <<< $cor)
sudo gdb $NBASE/bin/mysqld --quiet --batch --core=$cor -ex "set logging file $LOGDIR/$cnt.trace" --command=../backtrace.gdb
done
fi
pgid=$(ps -o pgid= $$ | grep -o '[0-9]*')
kill -TERM -$pgid || true
}
mshutdown(){
faildown=""
echo "Shutting down servers"
for s in `seq 1 $NUMC`;do
echo "Shutting down container Dock${s}"
runc Dock$s mysqladmin shutdown || failed+=" Dock${s}"
done
if [[ -n $faildown ]];then
echo "Failed in shutdown: $failed"
SHUTDN='no'
fi
}
preclean(){
set +e
echo "Stopping old docker containers"
runum "docker stop" &>/dev/null
echo "Removing old containers"
runum "docker rm -f" &>/dev/null
docker stop dnscluster &>/dev/null
docker rm -f dnscluster &>/dev/null
pkill -9 -f socat
pkill -9 -f mysqld
rm -rf $SOCKPATH && mkdir -p $SOCKPATH
set -e
}
wait_for_up(){
local cnt=$1
local count=0
local hostt=$(docker port $cnt 3306)
local hostr=$(cut -d: -f1 <<< $hostt)
local portr=$(cut -d: -f2 <<< $hostt)
set +e
while ! mysqladmin -h $hostr -P $portr -u root ping &>/dev/null;do
echo "Waiting for $cnt"
sleep 5
if [[ $count -gt $SLEEPCNT ]];then
echo "Failure"
exit 1
else
count=$(( count+1 ))
fi
done
echo "$cnt container up and running!"
SLEEPCNT=$(( SLEEPCNT+count ))
set -e
}
spawn_sock(){
local cnt=$1
hostt=$(docker port $cnt 3306)
hostr=$(cut -d: -f1 <<< $hostt)
portr=$(cut -d: -f2 <<< $hostt)
local socket=$SOCKPATH/${cnt}.sock
socat UNIX-LISTEN:${socket},fork,reuseaddr TCP:$hostr:$portr &
echo "$cnt also listening on $socket for $hostr:$portr"
if [[ -z $SOCKS ]];then
SOCKS="$socket"
else
SOCKS+=",$socket"
fi
}
belongs(){
local elem=$1
shift
local -a arr=$@
for x in ${arr[@]};do
if [[ $elem == $x ]];then
return 0
fi
done
return 1
}
trap cleanup EXIT KILL
preclean
if [[ $skip == "false" ]];then
pushd ../docker-tarball
docker build --rm -t ronin/pxc:tarball -f Dockerfile.centos7-64 . 2>&1 | tee $LOGDIR/Dock-pxc.log
popd
# Required for core-dump analysis
# rm -rf Percona-XtraDB-Cluster || true
fi
CSTR="gcomm://Dock1"
#for nd in `seq 2 $NUMC`;do
#CSTR="${CSTR},Dock${nd}"
#done
rm -f $HOSTSF && touch $HOSTSF
# Some Selinux foo
chcon -Rt svirt_sandbox_file_t $HOSTSF &>/dev/null || true
chcon -Rt svirt_sandbox_file_t $COREDIR &>/dev/null || true
#docker run -d -i -v $HOSTSF:/dnsmasq.hosts --name dnscluster ronin/dnsmasq &>$LOGDIR/dnscluster-run.log
docker run -d -i -v /dev/log:/dev/log -e SST_SYSLOG_TAG=dnsmasq -v $HOSTSF:/dnsmasq.hosts --name dnscluster ronin/dnsmasq bash -c "dnsmasq -8 /dev/null --dhcp-hostsfile=/dnsmasq.res --dhcp-range=172.17.0.1,172.17.0.253 -H /dnsmasq.hosts && while true;do sleep 1; pkill -HUP dnsmasq;done"
dnsi=$(docker inspect dnscluster | grep IPAddress | grep -oE '[0-9\.]+')
echo "Starting first node"
declare -a segloss
if [[ $RSEGMENT == 1 ]];then
SEGMENT=$(( RANDOM % (NUMC/2) ))
segloss[0]=$(( SEGMENT/2+1 ))
else
SEGMENT=0
fi
if [[ $FSYNC == '0' || $VSYNC == '1' ]];then
PRELOAD="/usr/lib64/libeatmydata.so"
else
PRELOAD=""
fi
docker run -P -e LD_PRELOAD=$PRELOAD -e FORCE_FTWRL=$FORCE_FTWRL -d -t -i -h Dock1 -v $COREDIR:/pxc/crash $PGALERA --dns $dnsi --name Dock1 ronin/pxc:tarball bash -c "ulimit -c unlimited && chmod 777 /pxc/crash && $CMD $ECMD --wsrep-new-cluster --wsrep-provider-options='gmcast.segment=$SEGMENT; evs.auto_evict=3; evs.version=1; evs.info_log_mask=0x3'" &>$LOGDIR/run-Dock1.log
wait_for_up Dock1
spawn_sock Dock1
FIRSTSOCK="$SOCKPATH/Dock1.sock"
firsti=$(docker inspect Dock1 | grep IPAddress | grep -oE '[0-9\.]+')
echo "$firsti Dock1" >> $HOSTSF
echo "$firsti Dock1.ci.percona.com" >> $HOSTSF
echo "$firsti meant for Dock1"
set -x
sysbench --test=$LPATH/parallel_prepare.lua ---report-interval=10 --oltp-auto-inc=$AUTOINC --mysql-db=test --db-driver=mysql --num-threads=$NUMT --mysql-engine-trx=yes --mysql-table-engine=innodb --mysql-socket=$FIRSTSOCK --mysql-user=root --oltp-table-size=$TSIZE --oltp_tables_count=$TCOUNT prepare 2>&1 | tee $LOGDIR/sysbench_prepare.txt
set +x
mysql -S $FIRSTSOCK -u root -e "create database testdb;" || true
nexti=$firsti
sleep 5
RANDOM=$(date +%s)
for rest in `seq 2 $NUMC`; do
echo "Starting node#$rest"
lasto=$(cut -d. -f4 <<< $nexti)
nexti=$(cut -d. -f1-3 <<< $nexti).$(( lasto+1 ))
echo "$nexti Dock${rest}" >> $HOSTSF
echo "$nexti Dock${rest}.ci.percona.com" >> $HOSTSF
echo "$nexti meant for Dock${rest}"
if [[ $RSEGMENT == "1" ]];then
SEGMENT=$(( RANDOM % (NUMC/2) ))
segloss[$(( rest-1 ))]=$(( SEGMENT/2+1 ))
else
SEGMENT=0
fi
if [[ $FSYNC == '0' || ( $VSYNC == '1' && $(( RANDOM%2 )) == 0 ) ]];then
PRELOAD="/usr/lib64/libeatmydata.so"
else
PRELOAD=""
fi
set -x
docker run -P -e LD_PRELOAD=$PRELOAD -e FORCE_FTWRL=$FORCE_FTWRL -d -t -i -h Dock$rest -v $COREDIR:/pxc/crash $PGALERA --dns $dnsi --name Dock$rest ronin/pxc:tarball bash -c "ulimit -c unlimited && chmod 777 /pxc/crash && $CMD $ECMD --wsrep_cluster_address=$CSTR --wsrep_node_name=Dock$rest --wsrep-provider-options='gmcast.segment=$SEGMENT; evs.auto_evict=3; evs.version=1; evs.info_log_mask=0x3'" &>$LOGDIR/run-Dock${rest}.log
set +x
#CSTR="${CSTR},Dock${rest}"
if [[ $(docker inspect Dock$rest | grep IPAddress | grep -oE '[0-9\.]+') != $nexti ]];then
echo "Assertion failed $nexti, $(docker inspect Dock$rest | grep IPAddress | grep -oE '[0-9\.]+') "
exit 1
fi
sleep $(( rest*2 ))
done
echo "Waiting for all servers"
for s in `seq 2 $NUMC`;do
wait_for_up Dock$s
spawn_sock Dock$s
done
# Will be needed for LOSS-WITH-SST
#int1=$(brctl show docker0 | tail -n +2 | grep -oE 'veth[a-z0-9]+' | head -1)
#sudo tc qdisc add dev $int1 root netem delay $DELAY loss $LOSS
#sysbench --test=$LPATH/parallel_prepare.lua ---report-interval=10 --oltp-auto-inc=$AUTOINC --mysql-db=test --db-driver=mysql --num-threads=$NUMT --mysql-engine-trx=yes --mysql-table-engine=innodb --mysql-socket=$SOCKS --mysql-user=root --oltp-table-size=$TSIZE --oltp_tables_count=$TCOUNT prepare 2>&1 | tee $LOGDIR/sysbench_prepare.txt
#sysbench --test=$LPATH/oltp.lua ---report-interval=10 --oltp-auto-inc=$AUTOINC --mysql-db=test --db-driver=mysql --num-threads=$NUMT --mysql-engine-trx=yes --mysql-table-engine=innodb --mysql-socket=$SOCKS --mysql-user=root --oltp-table-size=$TSIZE --oltp_tables_count=$TCOUNT prepare 2>&1 | tee $LOGDIR/sysbench_prepare.txt
#echo "Interfaces"
#ip addr
sleep 10
totsleep=10
echo "Pre-Sanity tests"
runagain=0
while true; do
runagain=0
for s in `seq 1 $NUMC`;do
stat1=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_cluster_status'" 2>/dev/null | tail -1)
stat2=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_state_comment'" 2>/dev/null | tail -1)
stat3=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_recv_queue'" 2>/dev/null | tail -1)
stat4=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_send_queue'" 2>/dev/null | tail -1)
stat5=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_evs_delayed'" 2>/dev/null | tail -1)
if [[ $stat1 != 'Primary' || $stat2 != 'Synced' || $stat3 != '0' || $stat4 != '0' ]];then
echo "Waiting for Dock${s} (or some other node to empty) to become really synced or primary: $stat1, $stat2, $stat3, $stat4, $stat5"
runagain=1
break
else
echo "Dock${s} is synced and is primary: $stat1, $stat2, $stat3, $stat4, $stat5"
fi
done
if [[ $runagain -eq 1 ]];then
sleep 10
totsleep=$(( totsleep+10 ))
continue
else
break
fi
done
echo "Slept for $totsleep in total"
for s in `seq 1 $NUMC`;do
for x in `seq 1 $TCOUNT`;do
if ! mysql -S $SOCKPATH/Dock${s}.sock -u root -e "select count(*) from test.sbtest$x" &>>$LOGDIR/sanity-pre.log;then
echo "FATAL: Failed in pre-sanity state for Dock${s} and table $x"
exit 1
fi
done
done
declare -a ints
declare -a intf
declare -a intall
if [[ $ALLINT == 1 ]];then
echo "Adding loss to $LOSSNO nodes out of $NUMC"
intf=(`shuf -i 1-$NUMC -n $LOSSNO`)
fi
intall=(`seq 1 $NUMC`)
if [[ $ALLINT == 1 ]];then
for int in ${intall[@]};do
echo "Adding delay to Dock${int} out of ${intall[@]}"
dpid=$(docker inspect -f '{{.State.Pid}}' Dock${int})
sudo nsenter -t $dpid -n tc qdisc replace dev $linter root handle 1: prio
if [[ $RSEGMENT == "1" ]];then
DELAY="$(( FIRSTD*${segloss[$(( int-1 ))]} ))ms $RESTD"
else
DELAY="${FIRSTD}ms $RESTD"
fi
if belongs $int ${intf[@]};then
sudo nsenter -t $dpid -n tc qdisc add dev $linter parent 1:2 handle 30: netem delay $DELAY loss $LOSS
else
sudo nsenter -t $dpid -n tc qdisc add dev $linter parent 1:2 handle 30: netem delay $DELAY
fi
done
else
echo "Adding delay $DELAY and loss $LOSS"
dpid=$(docker inspect -f '{{.State.Pid}}' Dock1)
sudo nsenter -t $dpid -n tc qdisc replace dev $linter root handle 1: prio
if [[ $RSEGMENT == "1" ]];then
DELAY="$(( FIRSTD*${segloss[0]} ))ms $RESTD"
else
DELAY="${FIRSTD}ms $RESTD"
fi
sudo nsenter -t $dpid -n tc qdisc add dev $linter parent 1:2 handle 30: netem delay $DELAY loss $LOSS
fi
if [[ $ALLINT == 1 && $EXCL == 1 ]];then
SOCKS=""
for nd in `seq 1 $NUMC`;do
if belongs $nd ${intf[@]};then
echo "Skipping Dock${nd} from SOCKS for loss"
continue
else
if [[ -z $SOCKS ]];then
SOCKS="$SOCKPATH/Dock${nd}.sock"
else
SOCKS+=",$SOCKPATH/Dock${nd}.sock"
fi
fi
done
echo "sysbench on sockets: $SOCKS"
fi
echo "Rules in place"
for s in `seq 1 $NUMC`;do
dpid=$(docker inspect -f '{{.State.Pid}}' Dock${s})
sudo nsenter -t $dpid -n tc qdisc show
done
if [[ ! -e $SDIR/${STEST}.lua ]];then
pushd /tmp
rm $STEST.lua || true
wget -O $STEST.lua http://files.wnohang.net/files/${STEST}.lua
SDIR=/tmp/
popd
fi
set -x
if [[ $ALLINT == 1 ]];then
timeout -k9 $(( SDURATION+200 )) sysbench --test=$SDIR/$STEST.lua --mysql-ignore-errors=1047,1213 --db-driver=mysql --mysql-db=test --mysql-engine-trx=yes --mysql-table-engine=innodb --mysql-socket=$SOCKS --mysql-user=root --num-threads=$TCOUNT --init-rng=on --max-requests=1870000000 --max-time=$SDURATION --oltp_index_updates=20 --oltp_non_index_updates=20 --oltp-auto-inc=$AUTOINC --oltp_distinct_ranges=15 --report-interval=10 --oltp_tables_count=$TCOUNT run 2>&1 | tee $LOGDIR/sysbench_rw_run.txt
else
timeout -k9 $(( SDURATION+200 )) sysbench --test=$SDIR/$STEST.lua --mysql-ignore-errors=1047,1213 --db-driver=mysql --mysql-db=test --mysql-engine-trx=yes --mysql-table-engine=innodb --mysql-socket=$FIRSTSOCK --mysql-user=root --num-threads=$TCOUNT --init-rng=on --max-requests=1870000000 --max-time=$SDURATION --oltp_index_updates=20 --oltp_non_index_updates=20 --oltp-auto-inc=$AUTOINC --oltp_distinct_ranges=15 --report-interval=10 --oltp_tables_count=$TCOUNT run 2>&1 | tee $LOGDIR/sysbench_rw_run.txt
fi
set +x
if [[ $RMOVE == '1' ]];then
if [[ $ALLINT == 1 ]];then
for int in ${intall[@]};do
echo "Removing delay $DELAY and loss $LOSS for container Dock${int}"
dpid=$(docker inspect -f '{{.State.Pid}}' Dock${int})
#sudo nsenter -t $dpid -n tc qdisc del dev $linter root netem || true
sudo nsenter -t $dpid -n tc qdisc change dev $linter parent 1:2 handle 30: netem delay $DELAY || true
done
else
echo "Removing delay $DELAY and loss $LOSS for Dock1"
dpid=$(docker inspect -f '{{.State.Pid}}' Dock1)
#sudo nsenter -t $dpid -n tc qdisc del dev $linter root netem || true
sudo nsenter -t $dpid -n tc qdisc change dev $linter parent 1:2 handle 30: netem delay $DELAY || true
fi
fi
for s in `seq 1 $NUMC`;do
#if [[ $RMOVE == '0' ]] && belongs $s ${intf[@]};then
#echo "Skipping Dock${s} from SOCKS"
#continue
#fi
stat1=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_cluster_status'" 2>/dev/null | tail -1)
stat2=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_state_comment'" 2>/dev/null | tail -1)
stat3=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_recv_queue'" 2>/dev/null | tail -1)
stat4=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_send_queue'" 2>/dev/null | tail -1)
stat5=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_evs_delayed'" 2>/dev/null | tail -1)
if [[ $stat1 != 'Primary' || $stat2 != 'Synced' ]];then
echo "Dock${s} seems to be not stable: $stat1, $stat2, $stat3, $stat4, $stat5"
else
echo "Dock${s} is synced and is primary: $stat1, $stat2, $stat3, $stat4, $stat5"
fi
done
echo "Sleeping for $RSLEEP seconds for reconciliation"
sleep $RSLEEP
echo "Sanity tests"
echo "Statuses"
maxsleep=300
totsleep=0
while true;do
exitfatal=0
whichisstr=""
for s in `seq 1 $NUMC`;do
if [[ $RMOVE == '0' ]] && belongs $s ${intf[@]};then
echo "Skipping Dock${s} from SOCKS"
continue
fi
stat1=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_cluster_status'" 2>/dev/null | tail -1)
stat2=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_state_comment'" 2>/dev/null | tail -1)
stat3=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_recv_queue'" 2>/dev/null | tail -1)
stat4=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_send_queue'" 2>/dev/null | tail -1)
stat5=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_evs_delayed'" 2>/dev/null | tail -1)
if [[ $stat1 != 'Primary' || $stat2 != 'Synced' ]];then
echo "FATAL: Dock${s} seems to be STILL unstable: $stat1, $stat2, $stat3, $stat4, $stat5"
stat=$(mysql -nNE -S $SOCKPATH/Dock${s}.sock -u root -e "show global status like 'wsrep_local_state'" 2>/dev/null | tail -1)
echo "wsrep_local_state of Dock${s} is $stat"
if [[ $stat1 == 'Primary' && ( $stat == '2' || $stat == '1' || $stat == '3' || $stat2 == *Join* || $stat2 == *Don* ) ]];then
exitfatal=3
whichisstr="Dock${s}"
break
else
exitfatal=1
fi
else
echo "Dock${s} is synced and is primary: $stat1, $stat2, $stat3, $stat4, $stat5"
fi
done
if [[ $exitfatal -eq 1 || $totsleep -gt $maxsleep ]];then
exitfatal=1
break
elif [[ $exitfatal -eq 3 ]];then
echo " $whichisstr is still donor/joiner, sleeping 60 seconds"
sleep 60
totsleep=$(( totsleep+60 ))
else
break
fi
echo
echo
done
echo "Sanity queries"
for s in `seq 1 $NUMC`;do
if [[ $RMOVE == '0' ]] && belongs $s ${intf[@]};then
echo "Skipping Dock${s} from SOCKS"
continue
fi
for x in `seq 1 $TCOUNT`;do
echo "For table test.sbtest$x from node Dock${s}" | tee -a $LOGDIR/sanity.log
mysql -S $SOCKPATH/Dock${s}.sock -u root -e "select count(*) from test.sbtest$x" 2>>$LOGDIR/sanity.log || exitfatal=1
done
done
if [[ $exitfatal -eq 1 ]];then
echo "Exit fatal"
if [[ $CATAL == '1' ]];then
echo "Killing with SIGSEGV for core dumps"
pkill -11 -f mysqld || true
sleep 60
fi
exit 1
fi
echo "Sleeping 5s before drop table"
sleep 5
set -x
timeout -k9 $(( SDURATION+200 )) sysbench --test=$LPATH/parallel_prepare.lua ---report-interval=10 --oltp-auto-inc=$AUTOINC --mysql-db=test --db-driver=mysql --num-threads=$NUMT --mysql-engine-trx=yes --mysql-table-engine=innodb --mysql-socket=$SOCKS --mysql-user=root --oltp-table-size=$TSIZE --oltp_tables_count=$TCOUNT cleanup 2>&1 | tee $LOGDIR/sysbench_cleanup.txt
set +x
sleep 20
mysql -S $FIRSTSOCK -u root -e "drop database testdb;" || SHUTDN='no'
sleep 10
if [[ $SHUTDN == 'no' ]];then
echo "Exit before cleanup"
exit
fi
mshutdown
|
percona/pxc-docker
|
partition-test/galera-partition.sh
|
Shell
|
gpl-2.0
| 21,525 |
rm setup.py
py2applet --make-setup SQLite\ Browser_CN.py
python setup.py py2app --iconfile logo.icns
|
XD-DENG/SQLite-Browser
|
scripts/GUI/CN/Build_Mac_Application_CN.sh
|
Shell
|
gpl-2.0
| 101 |
#!/bin/bash
#set -x
trap exit SIGHUP SIGINT SIGTERM
taskDir=rsfcGraphAnalysis
task=ESTOP
## task=funcon
RSFC_ROOT=/data/sanDiego
if [[ $# -gt 0 ]] ; then
subjects="$*"
else
# subjects="$( cat ../data/config/control.subjectList.txt ../data/config/mdd.nat.txt )"
# subjects="106_C 112_C 118_C 144_C 149_C 158_C 161_C 300_C 304_C
# 311_C 315_C 317_C 322_C 330_C 337_C 341_C 357_C 365_C
# 367_C 389_C 397_C 403_C 410_C 415_C 111_C 117_C 120_C
# 147_C 150_C 160_C 167_C 301_C 309_C 313_C 316_C 320_C
# 323_C 336_C 339_C 348_C 364_C 366_C 380_C 392_C 401_C
# 406_C 414_C 419_C"
subjects="386_D 387_D 388_D 389_D 392_D 393_D 394_D 394_D2 397_D
401_D 402_D 403_D 404_D 405_D 406_D 413_D 415_D 420_D 421_C 422_C
423_C 423_D 424_C 426_A"
## change task above too to match this list of subjects
##subjects=$( cd $RSFC_ROOT/$taskDir/data; \ls -1d *_[ABC] )
## subjects="$( cat /data/sanDiego/cESTOP/data/config/clean.estop.subjList.txt /data/sanDiego/cESTOP/data/config/clean.estop.mddfollowup.txt )"
fi
date
if [[ ! -d ../log ]] ; then
mkdir ../log
fi
for subject in $subjects ; do
## if [[ -f $RSFC_ROOT/$taskDir/data/$subject/${subject}$task+orig.HEAD ]] ; then
cat <<EOF > run/run-followup-$subject.sh
#!/bin/bash
## jobname
#$ -N followup-$subject
## queue
#$ -q all.q
## binary?
#$ -b y
## rerunnable?
#$ -r y
## merge stdout and stderr?
#$ -j y
## send no mail
#$ -m n
## execute from the current working directory
#$ -cwd
## use a shell to run the command
#$ -shell yes
## set the shell
#$ -S /bin/bash
## preserve environment
#$ -V
AFNI_COMPRESSOR=GZIP
AFNI_DECONFLICT=OVERWRITE
CPUS=1
JOBS="-jobs \$CPUS"
OMP_NUM_THREADS=\$CPUS
export JOBS OMP_NUM_THREADS AFNI_COMPRESSOR AFNI_DECONFLICT
cd $RSFC_ROOT/$taskDir/scripts
./01-preprocessAnatomy.sh --subject=$subject
./02-makeNuisanceMasks.sh -s ${subject}
## ./02-preprocessFunctional.sh --subject=$subject --drop=0 --fwhm=4.2 -c 0.3
## ./04-extractRsfcTimeseriesFromAalMasks.sh --subject=$subject -l ../standard/config/Harvard-Oxford_amygdala_seeds.txt
## ./04-extractEstopTimeseriesFromAalMasks.sh --subject=$subject --seedlist ../standard/aal_for_SPM8/fcseedlist3mm.txt
EOF
chmod +x run/run-followup-$subject.sh
echo $subject
qsub -o ../log/followup-$subject.log \
run/run-followup-$subject.sh
# else
# echo "*** No such file: $RSFC_ROOT/$taskDir/data/$subject/${subject}$task+orig.HEAD"
# fi
done
|
colmconn/rsfcGraphAnalysis
|
runEachSubjectParallel.sh
|
Shell
|
gpl-2.0
| 2,535 |
# Openstack icehouse installation script on ubuntu 14.04
# by kasidit chanchio
# vasabilab, dept of computer science,
# Thammasat University, Thailand
#
# Copyright 2014 Kasidit Chanchio
#
# this script or commands below should be called
# prior to openstack installation
#
#!/bin/bash
#
cd $HOME/OPSInstaller/network
pwd
cp files/hosts /etc/hosts
cp /etc/apt/sources.list /etc/apt/sources.list.saved
cp files/local-sources.list /etc/apt/sources.list
apt-get update
#printf "set repo and update\n"
apt-get -y install software-properties-common
add-apt-repository cloud-archive:mitaka
#
# assume the controller is up to date.
#
apt-get update
apt-get -y dist-upgrade
apt-get -y install python-openstackclient
# reboot (if needed)
reboot
|
kasidit/openstack-mitaka-installer
|
documents/OPSInstaller.example/network/exe-stage00-SUDO-update.sh
|
Shell
|
gpl-2.0
| 744 |
# Copyright (c) 2015 Igor Pecovnik, igor.pecovnik@gma**.com
#
# This file is licensed under the terms of the GNU General Public
# License version 2. This program is licensed "as is" without any
# warranty of any kind, whether express or implied.
# This file is a part of the Armbian build script
# https://github.com/armbian/build/
# Main program
#
if [[ $(basename $0) == main.sh ]]; then
echo "Please use compile.sh to start the build process"
exit -1
fi
# default umask for root is 022 so parent directories won't be group writeable without this
# this is used instead of making the chmod in prepare_host() recursive
umask 002
# destination
DEST=$SRC/output
TTY_X=$(($(stty size | awk '{print $2}')-6)) # determine terminal width
TTY_Y=$(($(stty size | awk '{print $1}')-6)) # determine terminal height
# We'll use this title on all menus
backtitle="Armbian building script, http://www.armbian.com | Author: Igor Pecovnik"
# if language not set, set to english
[[ -z $LANGUAGE ]] && export LANGUAGE="en_US:en"
# default console if not set
[[ -z $CONSOLE_CHAR ]] && export CONSOLE_CHAR="UTF-8"
[[ -z $FORCE_CHECKOUT ]] && FORCE_CHECKOUT=yes
# Load libraries
source $SRC/lib/debootstrap-ng.sh # System specific install
source $SRC/lib/image-helpers.sh # helpers for OS image building
source $SRC/lib/distributions.sh # System specific install
source $SRC/lib/desktop.sh # Desktop specific install
source $SRC/lib/compilation.sh # Patching and compilation of kernel, uboot, ATF
source $SRC/lib/makeboarddeb.sh # Create board support package
source $SRC/lib/general.sh # General functions
source $SRC/lib/chroot-buildpackages.sh # Building packages in chroot
# compress and remove old logs
mkdir -p $DEST/debug
(cd $DEST/debug && tar -czf logs-$(<timestamp).tgz *.log) > /dev/null 2>&1
rm -f $DEST/debug/*.log > /dev/null 2>&1
date +"%d_%m_%Y-%H_%M_%S" > $DEST/debug/timestamp
# delete compressed logs older than 7 days
(cd $DEST/debug && find . -name '*.tgz' -mtime +7 -delete) > /dev/null
if [[ $PROGRESS_DISPLAY == none ]]; then
OUTPUT_VERYSILENT=yes
elif [[ $PROGRESS_DISPLAY == dialog ]]; then
OUTPUT_DIALOG=yes
fi
if [[ $PROGRESS_LOG_TO_FILE != yes ]]; then unset PROGRESS_LOG_TO_FILE; fi
SHOW_WARNING=yes
if [[ $USE_CCACHE != no ]]; then
CCACHE=ccache
export PATH="/usr/lib/ccache:$PATH"
# private ccache directory to avoid permission issues when using build script with "sudo"
# see https://ccache.samba.org/manual.html#_sharing_a_cache for alternative solution
[[ $PRIVATE_CCACHE == yes ]] && export CCACHE_DIR=$SRC/cache/ccache
else
CCACHE=""
fi
# Check and install dependencies, directory structure and settings
prepare_host
# if KERNEL_ONLY, KERNEL_CONFIGURE, BOARD, BRANCH or RELEASE are not set, display selection menu
if [[ -z $KERNEL_ONLY ]]; then
options+=("yes" "U-boot and kernel packages")
options+=("no" "Full OS image for flashing")
KERNEL_ONLY=$(dialog --stdout --title "Choose an option" --backtitle "$backtitle" --no-tags --menu "Select what to build" \
$TTY_Y $TTY_X $(($TTY_Y - 8)) "${options[@]}")
unset options
[[ -z $KERNEL_ONLY ]] && exit_with_error "No option selected"
fi
if [[ -z $KERNEL_CONFIGURE ]]; then
options+=("no" "Do not change the kernel configuration")
options+=("yes" "Show a kernel configuration menu before compilation")
KERNEL_CONFIGURE=$(dialog --stdout --title "Choose an option" --backtitle "$backtitle" --no-tags --menu "Select the kernel configuration" \
$TTY_Y $TTY_X $(($TTY_Y - 8)) "${options[@]}")
unset options
[[ -z $KERNEL_CONFIGURE ]] && exit_with_error "No option selected"
fi
if [[ -z $BOARD ]]; then
WIP_STATE=supported
WIP_BUTTON='CSC/WIP/EOS'
STATE_DESCRIPTION=' - Officially supported boards'
temp_rc=$(mktemp)
while true; do
options=()
if [[ $WIP_STATE == supported ]]; then
for board in $SRC/config/boards/*.conf; do
options+=("$(basename $board | cut -d'.' -f1)" "$(head -1 $board | cut -d'#' -f2)")
done
else
for board in $SRC/config/boards/*.wip; do
options+=("$(basename $board | cut -d'.' -f1)" "\Z1(WIP)\Zn $(head -1 $board | cut -d'#' -f2)")
done
for board in $SRC/config/boards/*.csc; do
options+=("$(basename $board | cut -d'.' -f1)" "\Z1(CSC)\Zn $(head -1 $board | cut -d'#' -f2)")
done
for board in $SRC/config/boards/*.eos; do
options+=("$(basename $board | cut -d'.' -f1)" "\Z1(EOS)\Zn $(head -1 $board | cut -d'#' -f2)")
done
fi
if [[ $WIP_STATE != supported ]]; then
cat <<-'EOF' > $temp_rc
dialog_color = (RED,WHITE,OFF)
screen_color = (WHITE,RED,ON)
tag_color = (RED,WHITE,ON)
item_selected_color = (WHITE,RED,ON)
tag_selected_color = (WHITE,RED,ON)
tag_key_selected_color = (WHITE,RED,ON)
EOF
else
echo > $temp_rc
fi
BOARD=$(DIALOGRC=$temp_rc dialog --stdout --title "Choose a board" --backtitle "$backtitle" --scrollbar --colors \
--extra-label "Show $WIP_BUTTON" --extra-button --menu "Select the target board. Displaying:\n$STATE_DESCRIPTION" \
$TTY_Y $TTY_X $(($TTY_Y - 8)) "${options[@]}")
STATUS=$?
if [[ $STATUS == 3 ]]; then
if [[ $WIP_STATE == supported ]]; then
[[ $SHOW_WARNING == yes ]] && show_developer_warning
STATE_DESCRIPTION=' - \Z1(CSC)\Zn - Community Supported Configuration\n - \Z1(WIP)\Zn - Work In Progress\n - \Z1(EOS)\Zn - End Of Support'
WIP_STATE=unsupported
WIP_BUTTON='supported'
else
STATE_DESCRIPTION=' - Officially supported boards'
WIP_STATE=supported
WIP_BUTTON='CSC/WIP/EOS'
fi
continue
elif [[ $STATUS == 0 ]]; then
break
fi
unset options
[[ -z $BOARD ]] && exit_with_error "No board selected"
done
fi
if [[ -f $SRC/config/boards/${BOARD}.conf ]]; then
BOARD_TYPE='conf'
elif [[ -f $SRC/config/boards/${BOARD}.csc ]]; then
BOARD_TYPE='csc'
elif [[ -f $SRC/config/boards/${BOARD}.wip ]]; then
BOARD_TYPE='wip'
elif [[ -f $SRC/config/boards/${BOARD}.eos ]]; then
BOARD_TYPE='eos'
fi
source $SRC/config/boards/${BOARD}.${BOARD_TYPE}
LINUXFAMILY="${BOARDFAMILY}"
[[ -z $KERNEL_TARGET ]] && exit_with_error "Board configuration does not define valid kernel config"
if [[ -z $BRANCH ]]; then
options=()
[[ $KERNEL_TARGET == *default* ]] && options+=("default" "Vendor provided / legacy (3.4.x - 4.4.x)")
[[ $KERNEL_TARGET == *next* ]] && options+=("next" "Mainline (@kernel.org) (4.x)")
[[ $KERNEL_TARGET == *dev* && $EXPERT = yes ]] && options+=("dev" "\Z1Development version (4.x)\Zn")
# do not display selection dialog if only one kernel branch is available
if [[ "${#options[@]}" == 2 ]]; then
BRANCH="${options[0]}"
else
BRANCH=$(dialog --stdout --title "Choose a kernel" --backtitle "$backtitle" --colors \
--menu "Select the target kernel branch\nExact kernel versions depend on selected board" \
$TTY_Y $TTY_X $(($TTY_Y - 8)) "${options[@]}")
fi
unset options
[[ -z $BRANCH ]] && exit_with_error "No kernel branch selected"
[[ $BRANCH == dev && $SHOW_WARNING == yes ]] && show_developer_warning
else
[[ $KERNEL_TARGET != *$BRANCH* ]] && exit_with_error "Kernel branch not defined for this board" "$BRANCH"
fi
if [[ $KERNEL_ONLY != yes && -z $RELEASE ]]; then
options=()
options+=("jessie" "Debian 8 Jessie")
options+=("stretch" "Debian 9 Stretch")
options+=("xenial" "Ubuntu Xenial 16.04 LTS")
[[ $EXPERT = yes ]] && options+=("bionic" "Ubuntu Bionic 18.04 LTS")
RELEASE=$(dialog --stdout --title "Choose a release" --backtitle "$backtitle" --menu "Select the target OS release" \
$TTY_Y $TTY_X $(($TTY_Y - 8)) "${options[@]}")
unset options
[[ -z $RELEASE ]] && exit_with_error "No release selected"
fi
if [[ $KERNEL_ONLY != yes && -z $BUILD_DESKTOP ]]; then
options=()
options+=("no" "Image with console interface (server)")
options+=("yes" "Image with desktop environment")
BUILD_DESKTOP=$(dialog --stdout --title "Choose image type" --backtitle "$backtitle" --no-tags --menu "Select the target image type" \
$TTY_Y $TTY_X $(($TTY_Y - 8)) "${options[@]}")
unset options
[[ -z $BUILD_DESKTOP ]] && exit_with_error "No option selected"
fi
source $SRC/lib/configuration.sh
# optimize build time with 100% CPU usage
CPUS=$(grep -c 'processor' /proc/cpuinfo)
if [[ $USEALLCORES != no ]]; then
CTHREADS="-j$(($CPUS + $CPUS/2))"
else
CTHREADS="-j1"
fi
start=`date +%s`
[[ $CLEAN_LEVEL == *sources* ]] && cleaning "sources"
# ignore updates help on building all images - for internal purposes
# fetch_from_repo <url> <dir> <ref> <subdir_flag>
if [[ $IGNORE_UPDATES != yes ]]; then
display_alert "Downloading sources" "" "info"
fetch_from_repo "$BOOTSOURCE" "$BOOTDIR" "$BOOTBRANCH" "yes"
fetch_from_repo "$KERNELSOURCE" "$KERNELDIR" "$KERNELBRANCH" "yes"
if [[ -n $ATFSOURCE ]]; then
fetch_from_repo "$ATFSOURCE" "$ATFDIR" "$ATFBRANCH" "yes"
fi
fetch_from_repo "https://github.com/linux-sunxi/sunxi-tools" "sunxi-tools" "branch:master"
fetch_from_repo "https://github.com/rockchip-linux/rkbin" "rkbin-tools" "branch:master"
fetch_from_repo "https://github.com/MarvellEmbeddedProcessors/A3700-utils-marvell" "marvell-tools" "branch:A3700_utils-armada-17.10"
fetch_from_repo "https://github.com/armbian/odroidc2-blobs" "odroidc2-blobs" "branch:master"
fi
if [[ $BETA == yes ]]; then
IMAGE_TYPE=nightly
elif [[ $BETA != "yes" && $BUILD_ALL == yes && -n $GPG_PASS ]]; then
IMAGE_TYPE=stable
else
IMAGE_TYPE=user-built
fi
compile_sunxi_tools
install_rkbin_tools
BOOTSOURCEDIR=$BOOTDIR/${BOOTBRANCH##*:}
LINUXSOURCEDIR=$KERNELDIR/${KERNELBRANCH##*:}
[[ -n $ATFSOURCE ]] && ATFSOURCEDIR=$ATFDIR/${ATFBRANCH##*:}
# define package names
DEB_BRANCH=${BRANCH//default}
# if not empty, append hyphen
DEB_BRANCH=${DEB_BRANCH:+${DEB_BRANCH}-}
CHOSEN_UBOOT=linux-u-boot-${DEB_BRANCH}${BOARD}
CHOSEN_KERNEL=linux-image-${DEB_BRANCH}${LINUXFAMILY}
CHOSEN_ROOTFS=linux-${RELEASE}-root-${DEB_BRANCH}${BOARD}
CHOSEN_DESKTOP=armbian-${RELEASE}-desktop
CHOSEN_KSRC=linux-source-${BRANCH}-${LINUXFAMILY}
for option in $(tr ',' ' ' <<< "$CLEAN_LEVEL"); do
[[ $option != sources ]] && cleaning "$option"
done
# Compile u-boot if packed .deb does not exist
if [[ ! -f $DEST/debs/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb ]]; then
if [[ -n $ATFSOURCE ]]; then
compile_atf
fi
compile_uboot
fi
# Compile kernel if packed .deb does not exist
if [[ ! -f $DEST/debs/${CHOSEN_KERNEL}_${REVISION}_${ARCH}.deb ]]; then
compile_kernel
fi
overlayfs_wrapper "cleanup"
# extract kernel version from .deb package
VER=$(dpkg --info $DEST/debs/${CHOSEN_KERNEL}_${REVISION}_${ARCH}.deb | grep Descr | awk '{print $(NF)}')
VER="${VER/-$LINUXFAMILY/}"
# create board support package
[[ -n $RELEASE && ! -f $DEST/debs/$RELEASE/${CHOSEN_ROOTFS}_${REVISION}_${ARCH}.deb ]] && create_board_package
# create desktop package
[[ -n $RELEASE && ! -f $DEST/debs/$RELEASE/${CHOSEN_DESKTOP}_${REVISION}_all.deb ]] && create_desktop_package
# build additional packages
[[ $EXTERNAL_NEW == compile ]] && chroot_build_packages
if [[ $KERNEL_ONLY != yes ]]; then
debootstrap_ng
else
display_alert "Kernel build done" "@host" "info"
display_alert "Target directory" "$DEST/debs/" "info"
display_alert "File name" "${CHOSEN_KERNEL}_${REVISION}_${ARCH}.deb" "info"
fi
# hook for function to run after build, i.e. to change owner of $SRC
# NOTE: this will run only if there were no errors during build process
[[ $(type -t run_after_build) == function ]] && run_after_build || true
end=`date +%s`
runtime=$(((end-start)/60))
display_alert "Runtime" "$runtime min" "info"
|
150balbes/lib
|
lib/main.sh
|
Shell
|
gpl-2.0
| 11,464 |
#!/bin/sh
#Скрипт запуска - бесконечный цикл, рассчитан на вылет программы, перезапуск с сохранением стандартного вывода
#Первый проход цикла помещает вывод в ./logs/syslog.log, при повторном переносит предыдущий лог в OLD
#Перезапуск через 10 секунд
#Сохраним pid текущего процесса
I=1
while [ $I -gt 0 ]; do
if [ $I -eq 1 ]
then
if [ -f ./logs/syslog.log ]; then
#копируем старый лог в OLD
cp ./logs/syslog.log ./logs/syslog.log.OLD
fi
#Чистим сарый лог
echo > ./logs/syslog.log
fi
#выводим дату в лог
echo `date` >> ./logs/syslog.log
#запускаем сервер
echo $$ > ./logs/run.pid
node ./node_modules/coffee-script/bin/coffee ./lib/main.coffee >> ./logs/syslog.log
#10 секундная пауза
sleep 10
I=$((I+1))
done
|
mborisv/SigmaLine
|
run.sh
|
Shell
|
gpl-2.0
| 1,013 |
# ----------------------------------------------------------------------------
# Gera um nome aleatório de N caracteres, alternando consoantes e vogais.
# Obs.: Se nenhum parâmetro for passado, gera um nome de 6 caracteres.
# Uso: zznomealeatorio [N]
# Ex.: zznomealeatorio
# zznomealeatorio 8
#
# Autor: Guilherme Magalhães Gall <gmgall (a) gmail com>
# Desde: 2013-03-03
# Versão: 2
# Licença: GPL
# Requisitos: zzseq zzaleatorio
# Tags: sugestão
# ----------------------------------------------------------------------------
zznomealeatorio ()
{
zzzz -h nomealeatorio "$1" && return
local vogais='aeiou'
local consoantes='bcdfghjlmnpqrstvxz'
# Sem parâmetros, gera nome de 6 caracteres.
local entrada=${1:-6}
local contador
local letra
local nome
local posicao
local lista
# Se a quantidade de parâmetros for incorreta ou não for número
# inteiro positivo, mostra mensagem de uso e sai.
(test $# -gt 1 || ! zztool testa_numero "$entrada") && {
zztool -e uso nomealeatorio
return 1
}
# Se o usuário quer um nome de 0 caracteres, basta retornar.
test "$entrada" -eq 0 && return
# Gera nome aleatório com $entrada caracteres. Alterna consoantes e
# vogais. Algoritmo baseado na função randomName() do código da
# página http://geradordenomes.com
for contador in $(zzseq "$entrada")
do
if test $((contador%2)) -eq 1
then
lista="$consoantes"
else
lista="$vogais"
fi
posicao=$(zzaleatorio 1 ${#lista})
letra=$(echo "$lista" | cut -c "$posicao")
nome="$nome$letra"
done
echo "$nome"
}
|
faustovaz/funcoeszz
|
zz/zznomealeatorio.sh
|
Shell
|
gpl-2.0
| 1,553 |
#!/bin/bash
sqlite3 $1 "SELECT * FROM users" | while read ROW; do
id=`echo $ROW | awk '{split($0,a,"|"); print a[1]}'`
name=`echo $ROW | awk '{split($0,a,"|"); print a[2]}'`
email=`echo $ROW | awk '{split($0,a,"|"); print a[3]}'`
echo $name;
echo $email;
./sendemail.sh $email $name
done
|
rfoecking/catpics
|
scripts/sendcat.sh
|
Shell
|
gpl-2.0
| 315 |
#!/bin/sh
# This script is the current "state of the art" of compiling GNUnet for the
# Sharp Zaurus SL5000. It does not work yet. You need to download the cross
# compiler rpms from the Sharp website to get it to "compile" -- note that
# it does not link...
# This is where the rpm installs the compiler, must be in the path!
export PATH=/opt/Embedix/tools/bin/:$PATH
# you may want to edit "configure" to force "linux" for host_os to be accepted
# for generating shared libraries (the generated configure expects "linux-gnu" which
# for some reason is not what it detects for the cross compilation.
#
# If you do that, the build fails when executables are linked with "malloc_rpl" not
# found/resolved. Beats me.
./configure --host=arm-linux --with-crypto=/opt/Embedix/ --with-storage=directory --without-gtk
|
pruby/GNUnet-Mirror
|
bin/arm.sh
|
Shell
|
gpl-2.0
| 815 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2012-2018 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# If we get an event that a file was modified, but the file was removed or
# renamed before we can process the event, the monitor should just
# delete the node from the db instead of failing an fstatat() call.
. ./tup.sh
check_monitor_supported
monitor
num=100
for i in `seq 1 $num`; do
touch foo$i
echo "hey" > foo$i
done
# Start processing all the create/modify events we made above, while simultaneously removing
# all the files. Some of these will be gone when we process the modify events, but we
# won't have gotten the delete events that would remove the modify events from the queue.
tup flush & rm foo*
stop_monitor
eotup
|
jonatanolofsson/tup
|
test/t7047-mod-deleted-file.sh
|
Shell
|
gpl-2.0
| 1,387 |
#!/bin/bash
# Sergio Cayuqueo <[email protected]>
# http://cayu.com.ar
# Script para chequear el database_status en Oracle
DATABASE_STATUS=`echo -e "set head off\nset pagesize 0\nSELECT status, database_status FROM v\\$instance;" | sqlplus -S "/ as sysdba"| cut -f1`
case "$DATABASE_STATUS" in
MOUNTED)
start
echo "CRITICAL - Los tablespaces de la base de datos estan $DATABASE_STATUS -" `date '+DATE: %m/%d/%y TIME:%H:%M:%S'`
exit 2;
;;
OPEN)
echo "OK - Los tablespaces de la base de datos estan $DATABASE_STATUS -" `date '+DATE: %m/%d/%y TIME:%H:%M:%S'`
exit 1;
;;
*)
echo "CRITICAL - Hay algun error con la base de datos $DATABASE_STATUS -" `date '+DATE: %m/%d/%y TIME:%H:%M:%S'`
exit 2;
esac
|
cayu/nagios-scripts
|
check_oracle_database_status.sh
|
Shell
|
gpl-2.0
| 830 |
#!/bin/bash
export PATH=~/kernel_build/arm-eabi-4.6/bin/:$PATH
export ARCH=arm
export SUBARCH=arm
export CROSS_COMPILE=~/kernel_build/arm-eabi-4.6/bin/arm-eabi-
export LOCALVERSION=+
#export INSTALL_MOD_PATH=~/kernel_build/rk3188/mod_fw
#board name
./mk.sh "mp92_1021"
|
andr7e/mediacomMP82S4
|
kernel/build_yf_mp92_1021.sh
|
Shell
|
gpl-2.0
| 273 |
#!/bin/bash
#
# Copyright (c) 2015 Igor Pecovnik, igor.pecovnik@gma**.com
#
# This file is licensed under the terms of the GNU General Public
# License version 2. This program is licensed "as is" without any
# warranty of any kind, whether express or implied.
#
# This file is a part of tool chain https://github.com/igorpecovnik/lib
#
#
# Source patching functions
#
# Functions:
# advanced_patch
# process_patch_file
# patching_sources
# advanced_patch <dest> <family> <device> <description>
#
# parameters:
# <dest>: u-boot, kernel
# <family>: u-boot: u-boot, u-boot-neo; kernel: sun4i-default, sunxi-next, ...
# <device>: cubieboard, cubieboard2, cubietruck, ...
# <description>: additional description text
#
# priority:
# $SRC/userpatches/<dest>/<family>/<device>
# $SRC/userpatches/<dest>/<family>
# $SRC/lib/patch/<dest>/<family>/<device>
# $SRC/lib/patch/<dest>/<family>
#
advanced_patch () {
local dest=$1
local family=$2
local device=$3
local description=$4
display_alert "Started patching process for" "$dest $description" "info"
display_alert "Looking for user patches in" "userpatches/$dest/$family" "info"
local names=()
local dirs=("$SRC/userpatches/$dest/$family/$device" "$SRC/userpatches/$dest/$family" "$SRC/lib/patch/$dest/$family/$device" "$SRC/lib/patch/$dest/$family")
# required for "for" command
shopt -s nullglob dotglob
# get patch file names
for dir in "${dirs[@]}"; do
for patch in $dir/*.patch; do
names+=($(basename $patch))
done
done
# remove duplicates
local names_s=($(echo "${names[@]}" | tr ' ' '\n' | LC_ALL=C sort -u | tr '\n' ' '))
# apply patches
for name in "${names_s[@]}"; do
for dir in "${dirs[@]}"; do
if [ -f "$dir/$name" ] || [ -L "$dir/$name" ]; then
if [ -s "$dir/$name" ]; then
process_patch_file "$dir/$name" "$description"
else
display_alert "... $name" "skipped" "info"
fi
break # next name
fi
done
done
}
# process_patch_file <file> <description>
#
# parameters:
# <file>: path to patch file
# <description>: additional description text
#
process_patch_file() {
local patch=$1
local description=$2
# detect and remove files which patch will create
LANGUAGE=english patch --batch --dry-run -p1 -N < $patch | grep create \
| awk '{print $NF}' | sed -n 's/,//p' | xargs -I % sh -c 'rm %'
# main patch command
echo "$patch $description" >> $DEST/debug/install.log
patch --batch --silent -p1 -N < $patch >> $DEST/debug/install.log 2>&1
if [ $? -ne 0 ]; then
display_alert "... $(basename $patch)" "failed" "wrn";
if [[ $EXIT_PATCHING_ERROR == "yes" ]]; then exit_with_error "Aborting due to" "EXIT_PATCHING_ERROR"; fi
else
display_alert "... $(basename $patch)" "succeeded" "info"
fi
}
patching_sources(){
#--------------------------------------------------------------------------------------------------------------------------------
# Patching kernel
#--------------------------------------------------------------------------------------------------------------------------------
cd $SOURCES/$LINUXSOURCEDIR
# what are we building
#grab_kernel_version
# this is a patch that Ubuntu Trusty compiler works
if [ "$(patch --dry-run -t -p1 < $SRC/lib/patch/kernel/compiler.patch | grep Reversed)" != "" ]; then
patch --batch --silent -t -p1 < $SRC/lib/patch/kernel/compiler.patch > /dev/null 2>&1
fi
# this exception is needed if we switch to legacy sunxi sources in configuration.sh to https://github.com/dan-and/linux-sunxi
if [[ $LINUXKERNEL == *dan-and* && ($BOARD == bana* || $BOARD == orangepi* || $BOARD == lamobo*) ]]; then
LINUXFAMILY="banana";
fi
# this exception is needed since AW boards share single mainline kernel
[[ $LINUXFAMILY == sun*i && $BRANCH != "default" ]] && LINUXFAMILY="sunxi"
# it can be changed in this process
grab_version "$SOURCES/$LINUXSOURCEDIR"
advanced_patch "kernel" "$LINUXFAMILY-$BRANCH" "$BOARD" "$LINUXFAMILY-$BRANCH $VER"
#---------------------------------------------------------------------------------------------------------------------------------
# Patching u-boot
#---------------------------------------------------------------------------------------------------------------------------------
cd $SOURCES/$BOOTSOURCEDIR
grab_version "$SOURCES/$BOOTSOURCEDIR"
advanced_patch "u-boot" "$BOOTSOURCE-$BRANCH" "$BOARD" "$BOOTSOURCE-$BRANCH $VER"
}
|
nfedera/igorpecovnik-lib
|
patching.sh
|
Shell
|
gpl-2.0
| 4,376 |
#! /usr/bin/env bash
cd `dirname $0`
out=$(grep buildVersion config.json | cut -d ":" -f 2 | cut -d '"' -f 2)
../node_modules/espresso/bin/espresso.js build
if [[ $* =~ --no-cache ]]
then
sed -i 's/<html manifest="cache.manifest">/<html>/' build/$out/index.html
fi
if [[ $* =~ --dev ]]
then
ln -snf $out build/current
echo "Build version $out to build/current"
else
tar czf build/KsMobil-v$out.tar.gz -C build/$out/ .
echo "Build version $out to build/KsMobil-v$out.tar.gz"
fi
cd -
|
bfpi/klarschiff-mobil
|
KsMobil/build.sh
|
Shell
|
gpl-2.0
| 493 |
convert images/OCS-565-A.png -crop 1553x453+0+0 +repage images/OCS-565-A-0.png
convert -append images/OCS-564-B-11.png images/OCS-565-A-0.png images/OCS-564-B-11.png
rm images/OCS-565-A-0.png
convert images/OCS-565-A.png -crop 1553x395+0+468 +repage images/OCS-565-A-1.png
convert images/OCS-565-A.png -crop 1553x309+0+866 +repage images/OCS-565-A-2.png
convert images/OCS-565-A.png -crop 1553x453+0+1190 +repage images/OCS-565-A-3.png
convert images/OCS-565-A.png -crop 1553x711+0+1658 +repage images/OCS-565-A-4.png
convert images/OCS-565-A.png -crop 1553x299+0+2384 +repage images/OCS-565-A-5.png
convert images/OCS-565-A.png -crop 1553x1903+0+2696 +repage images/OCS-565-A-6.png
#
#/OCS-565.png
convert images/OCS-565-B.png -crop 1555x869+0+0 +repage images/OCS-565-B-0.png
convert -append images/OCS-565-A-6.png images/OCS-565-B-0.png images/OCS-565-A-6.png
rm images/OCS-565-B-0.png
convert images/OCS-565-B.png -crop 1555x391+0+872 +repage images/OCS-565-B-1.png
convert images/OCS-565-B.png -crop 1555x60+0+1290 +repage images/OCS-565-B-2.png
convert images/OCS-565-B.png -crop 1555x402+0+1341 +repage images/OCS-565-B-3.png
convert images/OCS-565-B.png -crop 1555x1499+0+1754 +repage images/OCS-565-B-4.png
convert images/OCS-565-B.png -crop 1555x781+0+3258 +repage images/OCS-565-B-5.png
convert images/OCS-565-B.png -crop 1555x313+0+4054 +repage images/OCS-565-B-6.png
convert images/OCS-565-B.png -crop 1555x235+0+4374 +repage images/OCS-565-B-7.png
#
#/OCS-565.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/findindents.OCS-565.sh
|
Shell
|
gpl-2.0
| 1,478 |
#!/bin/bash
################################################################################
## ##
##copyright 2003, 2016 IBM Corp ##
## ##
## This program is free software; you can redistribute it and or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ##
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ##
## for more details. ##
## ##
## You should have received a copy of the GNU General Public Licens ##
## along with this program; if not, write to the Free Software ##
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ##
## ##
## File : pygtk2.sh ##
## ##
## Description: This testcase tests pygtk2 package ##
## ##
## Author: Anup Kumar, [email protected] ##
################################################################################
# source the utility functions
######cd $(dirname $0)
#LTPBIN=${LTPBIN%/shared}/pygtk2
source $LTPBIN/tc_utils.source
FIVDIR=${LTPBIN%/shared}/pygtk2
REQUIRED="python vncserver"
################################################################################
# Testcase functions
################################################################################
function tc_local_setup()
{
tc_root_or_break || exit
tc_exec_or_break $REQUIRED
# search the pygtk2 packages
rpm -q "pygtk2" >$stdout 2>$stderr
tc_break_if_bad $? "pygtk2 package is not installed"
# test case were failing to set the display
vncserver :123456 -SecurityTypes None >$stdout 2>$stderr
export DISPLAY=$hostname:123456
# create directory to run the test
if [ ! -d "/root/.local/share/" ]; then
mkdir -p /root/.local/share/
fi
}
function tc_local_cleanup()
{
# Stop the vncserver running at 123456
vncserver -kill :123456 > /dev/null
}
function run_test()
{
tc_info "calling the test from tests directory through runtest utility "
# start the test with runtest frame work
pushd $FIVDIR &>/dev/null
tc_register "running the test utility"
python tests/runtests.py 1>$stdout 2>$stderr
grep OK $stderr
RC=$?
grep FAILED $stderr
if [ $? -ne 0 ]; then
cat /dev/null > $stderr
fi
tc_pass_or_fail $RC "runtest failed to execute" || return
popd &>/dev/null
}
#
# main
#
tc_setup && \
run_test
|
PoornimaNayak/autotest-client-tests
|
linux-tools/pygtk2/pygtk2.sh
|
Shell
|
gpl-2.0
| 3,515 |
#!/bin/bash
cd "$HOME"
for i in ~/Sync/dotfiles/.*; do
ln -sf "$i"
done
rm .git
# vim: set ft=sh ts=2 sw=2 et:
|
Boohbah/dotfiles
|
link_dotfiles.sh
|
Shell
|
gpl-2.0
| 117 |
#!/bin/bash
# Questo script serve per creare una copia dei dati sulla macchina locale dentro una cartella specificata dall'utente dentro un server remoto.
# Per il backup è necessario installare rsync e avere conoscenza di SSH e in particolare del file ~/.ssh/config
#
# La struttura del backup è la seguente:
# root-backup
# |
# |--backup-hostname-mese
# | |--01 "backup completo"
# | |--02 "hardlink del backup del giorno preedente e copia solo delle differenze"
# | |--..
# | |--30
# |--backup-hostname-mese-precedente
# | |--01 "backup completo"
# | |--02 "hardlink del backup del giorno preedente e copia solo delle differenze"
# | |--..
# | |--30
# |--cartella-backup-compressi
# | |--archivio-hostname-mese.tar.bz2
# | |--archivio-hostname-mese-precedente.tar.bz2
# | |--..
# | |--archivio-hostname-mese-definito-utente.tar.bz2
#------------------------------------------------------------------------------------------
# Qui ci sono le variabili da modificare per il vostro utilizzo
# Variable per definire la cartella in cui viene eseguito lo script
CWD=$(/usr/bin/pwd)
# File dove si definisce i file/cartelle da escludere dal backup, per maggiori info guardare il file
EXCLUDE_FILTER=$CWD/exlude-path.txt
# Nome della macchina che si vuol fare il backup
HN=$(/bin/hostname)
# Nome della connessione SSH, da definire nel file ~.ssh/config
SSH_NAME=pi
# Cartella da dove iniziare il backup Es. / = root (tutto il computer)
LOCAL_FOLDER=$(/)
# Cartella remota, definire il percorso completo fino alla cartella dove si vuole tenere i backup (esclusa)
REMOTE_FOLDER=$(/media/1TORRHDD)
# Destinatario delle mail rissuntive di ogni backup
[email protected]
# File dove vengono salvate le informazioni che comporranno il corpo della mail
TESTO_MAIL=~/.mail.txt
# Data corrente, es 2015.06
DATE="date +%Y.%m"
# Giorno corrente Es 05 (quinto giorno del mese)
CURR_DAY="date +%d"
#Se non si vogliono le mail di notifica si imposti 0
MAIL=1
# Per quanti GIORNI si vuole tenere il backup come cartella prima che venga compressa come archivio, es 60 giorni = 2 mesi, quindi i due mesi prima rispetto alla cartella corrente del backup
MAX_FOLDER_OLD=60
# Per quanti GIORNI si vuole tenere l'archivio come cartella (il periodo viene calcolato dalla creazione della cartella, e quindi dall'eta del backup)
MAX_ARCHIVE_OLD=180
# Fine! Da qui in poi è a vostro rischio e pericolo
#------------------------------------------------------------------------------------------
BACKUP_FOLDER=backup_$HN_$DATE
COMPRESS_FOLDER=compressed-backup
if [[ $MAX_FOLDER_OLD % 30 != 0 ]]; then
MAX_FOLDER_OLD_MONTH = $(expr MAX_FOLDER_OLD / 30 + 1)
else
MAX_FOLDER_OLD_MONTH = $(expr MAX_FOLDER_OLD / 30)
fi
# "date +%m --date="$(date +%Y-%m-15) -"
#funzione per inviare una mail in caso di backup fallito
backupFailed() {
ssh $SSH_NAME (
echo "Backup fallito, controllare i permessi di scrittura della cartella remota o lo spazio rimanente sul disco" > $TESTO_MAIL
df -h > $TESTO_MAIL
mail -s "Backup Fallito" -r [email protected] $MAIL_DEST < $TESTO_MAIL
rm $TESTO_MAIL
)
}
echo "Copia in corso di $(LOCAL_FOLDER) dentro la cartella remota $(REMOTE/FOLDER)"
# Controlla se la cartella remota è scrivibile
if ( ! ssh $SSH_NAME [ -d $REMOTE_FOLDER && -w $REMOTE_FOLDER]); then
echo -e "\e[1;31mError: Cartella remota non esite e/o non scrivibile\e[0m"
if [[ $MAIL == 1 ]]; then
backupFailed
fi
exit 1
fi
# Entrare dentro ssh e creare le cartelle remote
ssh $SSH_NAME (
# Se la cartella esiste e si puo scrivere allora entra
if [[ -d $REMOTE_FOLDER && -w $REMOTE_FOLDER ]]; then
cd $REMOTE_FOLDER
# Se la cartella del backup non esiste, la crea
if [[ ! -d $BACKUP_FOLDER ]]; then
mkdir $BACKUP_FOLDER
# Crea la cartella che conterrà il backup del giorno in cui viene eseguito lo script
if [[ $(ls -A $BACKUP_FOLDER) ]]; then
mkdir $CURR_DAY
# Variabile che indica la cartella con il numero inferiore
LOW_DAY="ls $BACKUP_FOLDER | sort -V | head -n 1 | sed 's/\///'"
# Da definire se si vuole la copia del giorno precedente o del primo giorno del mese
cp -al $LOW_DAY $CURR_DAY
fi
HIGH_DAY="ls $BACKUP_FOLDER | sort -V | tail -n 1 | sed 's/\///'"
# TOMORROW_DAY="date -d 'tomorrow' +%d"
fi
if [[ ! -d $COMPRESS_FOLDER ]]; then
mkdir $COMPRESS_FOLDER
# Cerca e rimuove gli archivi più vecchi della data stabilita $MAX_ARCHIVE_O
find . -name archive_* -type f -mtime +$MAX_ARCHIVE_OLD -exec rm {}
# Cerca e crea un'archivio con la cartella più vecchia specificata da $MAX_FOLDER
tar jcf $(find . -name backup_* -type d -mtime +$MAX_FOLDER_OLD | sed 's/backup/archive/' | sed 's/\.\///').tar.bz2 $(find . -name backup_* -type d -mtime +$MAX_FOLDER_OLD)
# Cerca e sposta gli archivi presenti nella cartella di backup dentro la cartella dei vecchi backup archiviati
find . -name archive_* -type f -exec mv {} $COMPRESS_FOLDER \;
fi
fi
)
rsync -avx --timeout=30 --exlude-from=$EXCLUDE_FILTER $(LOCAL_FOLDER) -e ssh $SSH_NAME:$REMOTE_FOLDER/$BACKUP_FOLDER/$CURR_DAY
ssh $SSH_NAME "cd $REMOTE_FOLDER && find archive_* -type f -mtime +$MAX_ARCHIVE_OLD -exec rm {} \; && tar"
# Invia una mail per informare il corretto backup
# https://www.youtube.com/watch?v=A_jehHsTzvE
# video di Morrolinux su come impostare un mail server che ci farà da supporto
if [[ $MAIL==1 ]]; then
ssh $SSH_NAME (
echo "Backup completato!" > $TESTO_MAIL
echo -n Spazio totale occupato: && echo -n -e ' \t'
du -sh $REMOTE_FOLDER/$BACKUP_FOLDER | tail -n 1 >> $TESTO_MAIL
mail -s "backup-completato" $MAIL $TESTO_MAIL
rm -f $TESTO_MAIL
)
fi
|
pazpi/ATLab-backup-rsync
|
baskup.sh
|
Shell
|
gpl-2.0
| 5,844 |
#!/bin/bash
build_compile()
{
./configure \
--prefix=/usr \
--disable-static
make
}
build_test_level=3
build_test()
{
make check
}
build_pack()
{
make DESTDIR=$BUILD_PACK \
docdir=/usr/share/doc/check-0.12.0 \
install
}
|
phyglos/phyglos
|
bundles/phyglos-core.dir/check-0.12.0.sh
|
Shell
|
gpl-2.0
| 271 |
#! /bin/bash
. ${DIR}/trs_aescbc_sha1_common_defs.sh
SGW_CMD_XPRM='-w 300'
config_remote_xfrm()
{
ssh ${REMOTE_HOST} ip xfrm policy flush
ssh ${REMOTE_HOST} ip xfrm state flush
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \
dir out ptype main action allow \
tmpl proto esp mode transport reqid 1
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \
dir in ptype main action allow \
tmpl proto esp mode transport reqid 2
ssh ${REMOTE_HOST} ip xfrm state add \
src ${REMOTE_IPV4} dst ${LOCAL_IPV4} \
proto esp spi 7 reqid 1 mode transport replay-window 64 \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm state add \
src ${LOCAL_IPV4} dst ${REMOTE_IPV4} \
proto esp spi 7 reqid 2 mode transport replay-window 64 \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
}
config6_remote_xfrm()
{
config_remote_xfrm
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \
dir out ptype main action allow \
tmpl proto esp mode transport reqid 3
ssh ${REMOTE_HOST} ip xfrm policy add \
src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \
dir in ptype main action allow \
tmpl proto esp mode transport reqid 4
ssh ${REMOTE_HOST} ip xfrm state add \
src ${REMOTE_IPV6} dst ${LOCAL_IPV6} \
proto esp spi 9 reqid 3 mode transport replay-window 64 \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm state add \
src ${LOCAL_IPV6} dst ${REMOTE_IPV6} \
proto esp spi 9 reqid 4 mode transport replay-window 64 \
auth sha1 0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef \
enc aes 0xdeadbeefdeadbeefdeadbeefdeadbeef
ssh ${REMOTE_HOST} ip xfrm policy list
ssh ${REMOTE_HOST} ip xfrm state list
}
|
emmericp/dpdk
|
examples/ipsec-secgw/test/trs_aescbc_sha1_defs.sh
|
Shell
|
gpl-2.0
| 1,946 |
#!/bin/sh
#TODO: replace folder structure with variables declared in the beginning
#TODO: announcements
screensession=testserver
serverlocation=/data/testservers/wetfjordTest/serverMinecraft/
backuplocation=/data/backups/minecraft/testserver/
buildtoolslocation=/home/minecraft/buildtools/
javapath=/usr/java/oracle/jdk-17.0.1/bin/java
serverjar=spigot.jar
days=5
option="${1}"
#mem=${2:-1024}
case ${option} in
-start) MEM="${2:-1024}"
screen -d -m -S "$screensession"
sleep 2
screen -R "$screensession" -X stuff "cd "$serverlocation"\n"
screen -R "$screensession" -X stuff ""$javapath" -Xms"$MEM"M -Xmx"$MEM"M -jar "$serverlocation""$serverjar" nogui\n"
;;
-restart) MEM="${2:-1024}"
screen -R "$screensession" -X stuff "say server will reboot in 30 seconds. Back in 1 minute $(printf '\r')"
sleep 10
screen -R "$screensession" -X stuff "say server will reboot in 20 seconds. Back in 1 minute $(printf '\r')"
sleep 10
screen -R "$screensession" -X stuff "say server will reboot in 10 seconds. Back in 1 minute $(printf '\r')"
sleep 5
screen -R "$screensession" -X stuff "say server will reboot in 5 seconds. Back in 1 minute $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say server will reboot in 4 seconds. Back in 1 minute $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say server will reboot in 3 seconds. Back in 1 minute $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say server will reboot in 2 seconds. Back in 1 minute $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say server will reboot in 1 second. Back in 1 minute $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "stop $(printf '\r')"
sleep 20
cp "$buildtoolslocation""$serverjar" "$serverlocation""$serverjar"
screen -R "$screensession" -X stuff ""$javapath" -Xms"$MEM"M -Xmx"$MEM"M -jar "$serverlocation""$serverjar" nogui\n"
;;
-backup)
screen -R "$screensession" -X stuff "say Backup starting. You may experience a little lag$(printf '\r')"
screen -R "$screensession" -X stuff "save-off $(printf '\r')"
screen -R "$screensession" -X stuff "save-all $(printf '\r')"
sleep 3
rm -r /data/backups/latest/testserver/*
date=$(date +%y%m%d-%H%M%S)
tar -cpvzf "$backuplocation""$screensession-""$date".tar.gz "$serverlocation"
cp "$backuplocation""$screensession-""$date".tar.gz /data/backups/latest/testserver/
screen -R "$screensession" -X stuff "save-on $(printf '\r')"
screen -R "$screensession" -X stuff "save-all $(printf '\r')"
sleep 3
screen -R "$screensession" -X stuff "say Backup completed. $(printf '\r')"
;;
-stop)
screen -R "$screensession" -X stuff "say The server will shut down in 30 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 10
screen -R "$screensession" -X stuff "say The server will shut down in 20 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 10
screen -R "$screensession" -X stuff "say The server will shut down in 10 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 10
screen -R "$screensession" -X stuff "say The server will shut down in 5 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 5
screen -R "$screensession" -X stuff "say The server will shut down in 4 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say The server will shut down in 3 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say The server will shut down in 2 seconds. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "say The server will shut down in 1 second. For more information check facebook/twitter/discord/our website $(printf '\r')"
sleep 1
screen -R "$screensession" -X stuff "stop $(printf '\r')"
sleep 10
screen -R "$screensession" -x stuff 'exit\n'
;;
-delete)
find "$backuplocation"* -mindepth 1 -mtime +"$days" -delete
;;
-reload)
screen -R "$screensession" -X stuff "whitelist reload $(printf '\r')"
;;
-update) REVISION="${2}"
cd "$buildtoolslocation"
curl "https://hub.spigotmc.org/jenkins/job/BuildTools/lastSuccessfulBuild/artifact/target/BuildTools.jar" -o BuildTools.jar
"$javapath" -jar BuildTools.jar --rev "$REVISION"
mv "$buildtoolslocation"spigot-"$REVISION".jar "$buildtoolslocation""$serverjar"
;;
-announcement1)
screen -R "$screensession" -X stuff "say §2Announcement: Our new wiki will be the central hub of information @ wetfjord. Contribute! www.wetfjord.eu/wiki $(printf '\r')"
;;
-announcement2)
screen -R "$screensession" -X stuff "say §2Announcement: Join our discord (skype group replacement)!: https://discord.gg/QH2WfWw $(printf '\r')"
;;
*)
echo "`basename ${0}`:usage: [-start memory in mb] | [-restart memory in mb] | [-backup] | [-stop] | [-reload] | [-announcement]"
exit 1 # Command to come out of the program with status 1
;;
esac
|
Trustmega/Wetfjord-Universe
|
wetfjord.sh
|
Shell
|
gpl-3.0
| 5,294 |
#!/bin/sh
echo "# my aliases
alias aptu='sudo apt-get update && sudo apt-get dist-upgrade -y'
alias aptc='sudo apt-get autoclean && sudo apt-get clean && sudo apt-get autoremove -y'
alias apti='sudo apt-get install '" >> ~/.bashrc
sudo apt-get update && sudo apt-get dist-upgrade -y
sudo apt-get install build-essential autoconf automake autotools-dev dh-make debhelper devscripts fakeroot xutils lintian \
pbuilder linux-generic git wget curl lftp htop vim gksu dos2unix ruby ruby-dev rubygems \
python-dev python-setuptools python-pip python-apt dkms gftp filezilla mc git-buildpackage \
nfs-common nfs-kernel-server rpcbind asciidoc
sudo add-apt-repository -y ppa:libreoffice/ppa
sudo add-apt-repository -y ppa:maarten-baert/simplescreenrecorder
sudo add-apt-repository -y ppa:shutter/ppa
sudo add-apt-repository -y ppa:otto-kesselgulasch/gimp
sudo add-apt-repository -y "deb http://repository.spotify.com stable non-free"
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 94558F59
sudo apt-get update && sudo apt-get install skype ubuntu-restricted-extras libreoffice simplescreenrecorder simplescreenrecorder-lib:i386 shutter gimp spotify-client
sudo apt-get autoclean && sudo apt-get clean && sudo apt-get autoremove -y
git config --global user.name "Ryan P.C. McQuen"
git config --global user.email "[email protected]"
git config --global credential.helper cache
# sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys BB901940
# sudo apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 614C4B38
# wget -q https://dl-ssl.google.com/linux/linux_signing_key.pub -O- | sudo apt-key add -
# sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 1378B444
# sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B9BA26FA
# wget -q "http://deb.playonlinux.com/public.gpg" -O- | sudo apt-key add -
# sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 4E9CFF4E
# wget -q http://download.virtualbox.org/virtualbox/debian/oracle_vbox.asc -O- | sudo apt-key add -
# sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F9CB8DB0
cd ~/Downloads
#wget https://www.dropbox.com/download?dl=packages/ubuntu/dropbox_*_amd64.deb
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
#wget http://c758482.r82.cf2.rackcdn.com/sublime-text_build-*_amd64.deb
## sed -i "/^# deb .*partner/ s/^# //" /etc/apt/sources.list && apt-get update
|
paolme/linuxTweaks
|
ubuntu/ubuntu1204Dev64Restore.sh
|
Shell
|
gpl-3.0
| 2,424 |
# postexecute/GNU/Linux/60_save_knife_node_output.sh
# see also https://github.com/gdha/upgrade-ux/issues/100
if type -p knife >/dev/null 2>&1 ; then
[[ ! -f /etc/chef/client.rb ]] && return # we need this file to continue
if (( PREVIEW )) ; then
Log "knife node show (after patching) [not in preview mode]"
return
fi
knife node show $( grep node_name /etc/chef/client.rb | cut -d\" -f 2 ) -l -c /etc/chef/client.rb > "$VAR_DIR/$DS/knife_node_show.after"
fi
|
gdha/upgrade-ux
|
opt/upgrade-ux/scripts/postexecute/GNU/Linux/60_save_knife_node_output.sh
|
Shell
|
gpl-3.0
| 493 |
#!/bin/bash
echo
echo "<<< HOST >>>"
echo
/root/es_scripts/upgrade_debian.sh
echo
echo "<<< CONTAINERS >>>"
echo
/root/es_scripts/upgrade_container.sh
|
emrahcom/emrah-stretch
|
host/root/es_scripts/upgrade_all.sh
|
Shell
|
gpl-3.0
| 153 |
#!/bin/bash
# Copyright 2014 Cumulus Networks, Inc. All rights reserved.
# Author: Stanley Karunditu [email protected]
# Change the hostname without reboot.
# * Script is interactive
# Applies only to debian based OS
# Example:
# ./change_hostname.sh mynewswitch
#
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "new hostname is required"
# Help text
if [ "$1" == '-h' ]; then
echo "Usage: `basename $0` [newhostname]
Change hostname without a reboot"
exit 0
fi
OLD_HOSTNAME=`/bin/hostname`
NEW_HOSTNAME=$1
# Yes/no, yes/no, yes/no.. :)
read -p "Change Hostname from '${OLD_HOSTNAME}' to '${NEW_HOSTNAME}'
ARE YOU SURE? [y/N] " -n 2 -r
if [[ $REPLY =~ ^[yY]$ ]]
then
# change runtime hostname
hostname $1
cp /etc/hostname /etc/hostname.bak
echo $NEW_HOSTNAME > /etc/hostname
# change hostname in host table. Looks like a Debian thing.
# http://www.debian.org/doc/manuals/debian-reference/ch05.en.html#_the_hostname_resolution
cp /etc/hosts /etc/hosts.bak
sed -i "/127\.0\.1\.1/ s/${OLD_HOSTNAME}/${NEW_HOSTNAME}/" /etc/hosts
echo \
"Hostname changed to '${NEW_HOSTNAME}'
If the hostname is present in the bash prompt.
Log out and log back in"
fi
|
CumulusNetworks/customer-scripts
|
change_hostname.sh
|
Shell
|
gpl-3.0
| 1,202 |
#!/bin/sh
export GR_DONT_LOAD_PREFS=1
export srcdir=/home/katsikas/gnuradio/gnuradio-core/src/python/gnuradio/gr
export PATH=/home/katsikas/gnuradio/build/gnuradio-core/src/python/gnuradio/gr:$PATH
export LD_LIBRARY_PATH=/home/katsikas/gnuradio/build/volk/lib:/home/katsikas/gnuradio/build/gruel/src/lib:/home/katsikas/gnuradio/build/gnuradio-core/src/lib:$LD_LIBRARY_PATH
export PYTHONPATH=/home/katsikas/gnuradio/build/gnuradio-core/src/python:/home/katsikas/gnuradio/build/gnuradio-core/src/lib/swig:$PYTHONPATH
/usr/bin/python -B /home/katsikas/gnuradio/gnuradio-core/src/python/gnuradio/gr/qa_stream_mux.py
|
katsikas/gnuradio
|
build/gnuradio-core/src/python/gnuradio/gr/qa_stream_mux_test.sh
|
Shell
|
gpl-3.0
| 613 |
#!/bin/sh
gpasm -p pic12lf1822 hello.asm
|
mrtee/pickit2-f1xxx
|
test/make_hex.sh
|
Shell
|
gpl-3.0
| 41 |
#!/bin/bash
# Author: Claudio Di Ciccio
# Version: 0.8
# Date: 2013/09/20
# Description: This script launches the MinerFulSimuStarter to test what happens when making the alphabet size grow, slowly.
## Import the shell functions to create Regular Expressions expressing constraints
. ./constraintsFunctions.cfg
clear
MAINCLASS="minerful.MinerFulSimuStarter"
DEBUGLEVEL='none'
THRESHOLD=0.0
MAX_THRESHOLD=1.0
#MAX_TEST_REPEATS=10
MAX_TEST_REPEATS=24
MEMORY_MAX="2048m"
MIN_TESTBED_SIZE=1
#MAX_TESTBED_SIZE=2
#MAX_TESTBED_SIZE=1000
ULTRAMAX_TESTBED_SIZE=10000
MAX_TESTBED_SIZE=$ULTRAMAX_TESTBED_SIZE
OUTPUT_DIR="MINERful-accuracyTest"
OUTPUT_FILE_TEMPLATE="%s-accuracyTest.csv"
SUBSUMPTIONS_FILE_TEMPLATE="%s-subsumptions.csv"
FILENAME_TEMPLATE="$OUTPUT_DIR/$OUTPUT_FILE_TEMPLATE"
FILENAME_FOR_SUBSUMPTIONS_TEMPLATE="$OUTPUT_DIR/$SUBSUMPTIONS_FILE_TEMPLATE"
LEGEND="'Test ID';'Test Number';'Generating constraint';'Number of traces';'Minimum trace size';'Maximum trace size';'Avg. events per trace';'Events read';'Process alphabet size';'Discovered Constraint';'Support';'Confidence Level';'Interest Factor'"
alphabetCharacters=("a" "b" "c" "d" "e" "f" "g" "h")
alphabetCondensed=${alphabetCharacters[@]}
alphabet=`echo $alphabetCondensed | sed 's/ /:/g'`
alphabetCondensed=`echo $alphabetCondensed | sed 's/ //g'`
stringlenthrangemin[0]=$(( 0 * ${#alphabetCharacters[*]} ))
stringlenthrangemax[0]=$(( 1 * 2 * ${#alphabetCharacters[*]} ))
stringlenthrangemin[1]=$(( 1 * ${#alphabetCharacters[*]} ))
stringlenthrangemax[1]=$(( 1 * 2 * 4 * ${#alphabetCharacters[*]} ))
stringlenthrangemin[2]=$(( 1 * 2 * ${#alphabetCharacters[*]} ))
stringlenthrangemax[2]=$(( 1 * 2 * 4 * 8 * ${#alphabetCharacters[*]} ))
constraints=(
`Participation a` #00
`AtMostOne a` #01
`Init a` #02
`End a` #03
`RespondedExistence a b` #04
`Response a b` #05
`AlternateResponse a b` #06
`ChainResponse a b` #07
`Precedence a b` #08
`AlternatePrecedence a b` #09
`ChainPrecedence a b` #10
`CoExistence a b` #11
`Succession a b` #12
`AlternateSuccession a b` #13
`ChainSuccession a b` #14
`NotChainSuccession a b` #15
`NotSuccession a b` #16
`NotCoExistence a b` #17
)
constraintNames=(
"Participation_a" #00
"AtMostOne_a" #01
"Init_a" #02
"End_a" #03
"RespondedExistence_a__b" #04
"Response_a__b" #05
"AlternateResponse_a__b" #06
"ChainResponse_a__b" #07
"Precedence_a__b" #08
"AlternatePrecedence_a__b" #09
"ChainPrecedence_a__b" #10
"CoExistence_a__b" #11
"Succession_a__b" #12
"AlternateSuccession_a__b" #13
"ChainSuccession_a__b" #14
"NotChainSuccession_a__b" #15
"NotSuccession_a__b" #16
"NotCoExistence_a__b" #17
)
constraintSerifNames=(
"'Participation(a)'" #00
"'AtMostOne(a)'" #01
"'Init(a)'" #02
"'End(a)'" #03
"'RespondedExistence(a, b)'" #04
"'Response(a, b)'" #05
"'AlternateResponse(a, b)'" #06
"'ChainResponse(a, b)'" #07
"'Precedence(a, b)'" #08
"'AlternatePrecedence(a, b)'" #09
"'ChainPrecedence(a, b)'" #10
"'CoExistence(a, b)'" #11
"'Succession(a, b)'" #12
"'AlternateSuccession(a, b)'" #13
"'ChainSuccession(a, b)'" #14
"'NotChainSuccession(a, b)'" #15
"'NotSuccession(a, b)'" #16
"'NotCoExistence(a, b)'" #17
)
## The game begins!
## Create the subdirectory to store the output files
if [ ! -d $OUTPUT_DIR ]; then
mkdir $OUTPUT_DIR
fi
## For each constraint
#for (( constraintIdx=0; constraintIdx<${#constraints[*]}; constraintIdx++ ))
# do
### Empirically find and store the graph of subsumptions, referred to the current constraint
# outputString=`java -Xmx$MEMORY_MAX -classpath $LIBS $MAINCLASS -mR -d $DEBUGLEVEL -t $MAX_THRESHOLD -a $alphabet -m ${stringlenthrangemin[1]} -M ${stringlenthrangemax[2]} -s $ULTRAMAX_TESTBED_SIZE -r ${constraints[constraintIdx]}`
# echo `echo "$outputString" | grep -n 'Legend' | sed 's/^.*Legend[^:]*: //g'` > `printf "$FILENAME_FOR_SUBSUMPTIONS_TEMPLATE" "${constraintNames[constraintIdx]}"`
# echo "Successfully stored the net of subsumptions for ${constraintNames[constraintIdx]}"
#done
## Temporary variables
# headerString=`java -Xmx$MEMORY_MAX -classpath $LIBS $MAINCLASS -mR -d none -t 0.0 -a $alphabet -m ${#alphabetCharacters[*]} -M ${#alphabetCharacters[*]} -s 1 -r $alphabetCondensed`
# headerString=`echo "$headerString" | grep -n 'Legend' | sed 's/^.*Legend[^:]*: //g' | sed "s/'//g"`
# headerString=`echo "$LEGEND_INIT$headerString"`
filename=''
tid=0
q=0
## For each constraint
for (( constraintIdx=0; constraintIdx<${#constraints[*]}; constraintIdx++ ))
do
filename=`printf "$FILENAME_TEMPLATE" "${constraintNames[constraintIdx]}"`
echo "$LEGEND" > $filename
## For each combo of minimum string legth...
for (( min=0; min<${#stringlenthrangemin[*]}; min++ ))
do
## ... and max length
for (( max=0; max<${#stringlenthrangemax[*]}; max++ ))
do
## For each possible length of a string, from $MIN_TESTBED_SIZE to $MAX_TESTBED_SIZE
for (( num=$MIN_TESTBED_SIZE; num<=$MAX_TESTBED_SIZE; num++ ))
do
tid=`expr $tid + 1`
for (( i=1; i<=$MAX_TEST_REPEATS; i++ ))
do
q=`expr $q + 1`
outputString=`java -Xmx$MEMORY_MAX -jar MINERful.jar $MAINCLASS -CSV -d $DEBUGLEVEL -t $THRESHOLD -a $alphabet -m ${stringlenthrangemin[min]} -M ${stringlenthrangemax[max]} -s $num -r ${constraints[constraintIdx]}`
if [ $? -ne 0 ]
then
exit 1
fi
setupString=`echo "$outputString" | grep 'printComputationStats' | sed 's/.*- \(\([0-9\.]*;\)\{6\}\).*/\1/g'`
outputString=`echo "$outputString" | grep "^'" | sed "s/^'/$tid;$q;${constraintSerifNames[constraintIdx]};$setupString'/g"`
echo "$outputString" >> $filename
done
echo "$(($MAX_TESTBED_SIZE - $MIN_TESTBED_SIZE +1)) tests for ${constraintSerifNames[constraintIdx]} on [ ${stringlenthrangemin[min]} - ${stringlenthrangemax[max]} ] long strings done, on a testbed comprising $num strings"
done
echo "Tests for ${constraintSerifNames[constraintIdx]} on [ ${stringlenthrangemin[min]} - ${stringlenthrangemax[max]} ] long strings done"
done
done
done
echo "$q tests"
exit 0
|
cdc08x/MINERful
|
test-launchers/test-MINERful-slowlyGrowingLog.sh
|
Shell
|
gpl-3.0
| 6,718 |
#!/bin/sh
# Generate groff manpage from markdown
curl -F page=@docs/textsuggest.1.md http://mantastic.herokuapp.com 2>/dev/null
|
maateen/TextSuggestBangla
|
docs/gen_manpage_from_markdown.sh
|
Shell
|
gpl-3.0
| 130 |
#!/bin/bash
function launch_new_lxc()
{
lxc info $LXC_BASE >/dev/null && lxc delete $LXC_BASE --force
if [ $(get_arch) = $ARCH ];
then
lxc launch images:debian/$DIST/$ARCH $LXC_BASE -c security.privileged=true -c security.nesting=true
else
lxc image info $LXC_BASE >/dev/null && lxc image delete $LXC_BASE
tmp_dir=$(mktemp -d)
pushd $tmp_dir
lxc image export images:debian/$DIST/$ARCH
tar xJf lxd.tar.xz
local current_arch=$(get_arch)
sed -i "0,/architecture: $ARCH/s//architecture: $current_arch/" metadata.yaml
tar cJf lxd.tar.xz metadata.yaml templates
lxc image import lxd.tar.xz rootfs.squashfs --alias $LXC_BASE
popd
rm -rf "$tmp_dir"
lxc launch $LXC_BASE $LXC_BASE -c security.privileged=true -c security.nesting=true
fi
}
function rebuild_base_lxc()
{
check_lxd_setup
launch_new_lxc
sleep 5
IN_LXC="lxc exec $LXC_BASE --"
INSTALL_SCRIPT="https://install.yunohost.org/$DIST"
$IN_LXC apt install curl -y
$IN_LXC /bin/bash -c "curl $INSTALL_SCRIPT | bash -s -- -a -d $YNH_BRANCH"
$IN_LXC systemctl -q stop apt-daily.timer
$IN_LXC systemctl -q stop apt-daily-upgrade.timer
$IN_LXC systemctl -q stop apt-daily.service
$IN_LXC systemctl -q stop apt-daily-upgrade.service
$IN_LXC systemctl -q disable apt-daily.timer
$IN_LXC systemctl -q disable apt-daily-upgrade.timer
$IN_LXC systemctl -q disable apt-daily.service
$IN_LXC systemctl -q disable apt-daily-upgrade.service
$IN_LXC rm -f /etc/cron.daily/apt-compat
$IN_LXC cp /bin/true /usr/lib/apt/apt.systemd.daily
# Disable password strength check
$IN_LXC yunohost tools postinstall --domain $DOMAIN --password $YUNO_PWD --force-password
$IN_LXC yunohost settings set security.password.admin.strength -v -1
$IN_LXC yunohost settings set security.password.user.strength -v -1
$IN_LXC yunohost domain add $SUBDOMAIN
TEST_USER_DISPLAY=${TEST_USER//"_"/""}
$IN_LXC yunohost user create $TEST_USER --firstname $TEST_USER_DISPLAY --mail $TEST_USER@$DOMAIN --lastname $TEST_USER_DISPLAY --password "$YUNO_PWD"
$IN_LXC yunohost --version
lxc stop $LXC_BASE
lxc image delete $LXC_BASE
lxc publish $LXC_BASE --alias $LXC_BASE --public
lxc delete $LXC_BASE
}
|
YunoHost/package_check
|
lib/build_base_lxc.sh
|
Shell
|
gpl-3.0
| 2,369 |
function setupRequirement () {
pip install -U -r requirements.txt docker-py pytest-xdist==1.27.0 sauceclient
pip install -r requirements-optional.txt
}
# Sourcing and exporting required env vars and setting up robottelo properties
function setupPrerequisites () {
source "${CONFIG_FILES}"
source config/compute_resources.conf
source config/sat6_upgrade.conf
source config/sat6_repos_urls.conf
source config/subscription_config.conf
source config/fake_manifest.conf
source config/installation_environment.conf
cp config/robottelo.properties ./robottelo.properties
cp config/robottelo.yaml ./robottelo.yaml
sed -i "s/{server_hostname}/${RHEV_SAT_HOST}/" robottelo.properties
sed -i "s/# rhev_cap_host=.*/rhev_cap_host=${RHEV_CAP_HOST}/" robottelo.properties
sed -i "s/# rhev_capsule_ak=.*/rhev_capsule_ak=${RHEV_CAPSULE_AK}/" robottelo.properties
sed -i "s/# from_version=.*/from_version=${FROM_VERSION}/" robottelo.properties
sed -i "s/# to_version=.*/to_version=${TO_VERSION}/" robottelo.properties
sed -i "s/^# \[vlan_networking\].*/[vlan_networking]/" robottelo.properties
sed -i "s/# bridge=.*/bridge=${BRIDGE}/" robottelo.properties
sed -i "s/# subnet=.*/subnet=${SUBNET}/" robottelo.properties
sed -i "s/# gateway=.*/gateway=${GATEWAY}/" robottelo.properties
sed -i "s/# netmask=.*/netmask=${NETMASK}/" robottelo.properties
sed -i "s|sattools_repo=.*|sattools_repo=rhel8=${RHEL8_TOOLS_REPO},rhel7=${RHEL7_TOOLS_REPO},rhel6=${RHEL6_TOOLS_REPO}|" robottelo.properties
# Robottelo logging configuration
sed -i "s/'\(robottelo\).log'/'\1-${ENDPOINT}.log'/" logging.conf
# Bugzilla Login Details
sed -i "/^\[bugzilla\]/,/^\[/s/^#\?api_key=\w*/api_key=${BUGZILLA_KEY}/" robottelo.properties
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@"${SERVER_HOSTNAME}" -C "wget -O /etc/candlepin/certs/upstream/fake_manifest.crt $FAKE_MANIFEST_CERT_URL;systemctl restart tomcat"
}
# Pre-Upgrade specific required updates to environment
function setupPreUpgrade () {
# Installing nailgun according to FROM_VERSION
sed -i "s/nailgun.git.*/nailgun.git@${FROM_VERSION}.z#egg=nailgun/" requirements.txt
# Setting the SATELLITE_VERSION to FROM_VERSION for sourcing correct environment variables
export SATELLITE_VERSION="${FROM_VERSION}"
}
set +e
# Run pre-upgarde scenarios tests
if [ ${ENDPOINT} == 'pre-upgrade' ]; then
setupPreUpgrade
setupRequirement
setupPrerequisites
$(which py.test) -v --continue-on-collection-errors -s -m pre_upgrade --junit-xml=test_scenarios-pre-results.xml -o junit_suite_name=test_scenarios-pre tests/upgrades
else
setupRequirement
setupPrerequisites
$(which py.test) -v --continue-on-collection-errors -s -m post_upgrade --junit-xml=test_scenarios-post-results.xml -o junit_suite_name=test_scenarios-post tests/upgrades
# Delete the Original Manifest from the box to run robottelo tests
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@"${SERVER_HOSTNAME}" "hammer -u admin -p changeme subscription delete-manifest --organization 'Default Organization'"
fi
set -e
echo
echo "========================================"
echo "Server information"
echo "========================================"
echo "Hostname: $SERVER_HOSTNAME"
echo "Credentials: admin/changeme"
echo "========================================"
echo
echo "========================================"
|
lpramuk/robottelo-ci
|
scripts/satellite6-upgrade-run-scenarios.sh
|
Shell
|
gpl-3.0
| 3,487 |
#!/bin/sh
if [ $# -lt 2 ]; then
cat <<EOF
Usage: test_ldb.sh PROTOCOL SERVER [OPTIONS]
EOF
exit 1;
fi
p=$1
SERVER=$2
PREFIX=$3
shift 2
options="$*"
. `dirname $0`/subunit.sh
check() {
name="$1"
shift
cmdline="$*"
echo "test: $name"
$cmdline
status=$?
if [ x$status = x0 ]; then
echo "success: $name"
else
echo "failure: $name"
failed=`expr $failed + 1`
fi
return $status
}
export PATH="$BINDIR:$PATH"
ldbsearch="$VALGRIND ldbsearch"
check "RootDSE" $ldbsearch $CONFIGURATION $options --basedn='' -H $p://$SERVER -s base DUMMY=x dnsHostName highestCommittedUSN || failed=`expr $failed + 1`
echo "Getting defaultNamingContext"
BASEDN=`$ldbsearch $CONFIGURATION $options --basedn='' -H $p://$SERVER -s base DUMMY=x defaultNamingContext | grep defaultNamingContext | awk '{print $2}'`
echo "BASEDN is $BASEDN"
check "Listing Users" $ldbsearch $options $CONFIGURATION -H $p://$SERVER '(objectclass=user)' sAMAccountName || failed=`expr $failed + 1`
check "Listing Users (sorted)" $ldbsearch -S $options $CONFIGURATION -H $p://$SERVER '(objectclass=user)' sAMAccountName || failed=`expr $failed + 1`
check "Listing Groups" $ldbsearch $options $CONFIGURATION -H $p://$SERVER '(objectclass=group)' sAMAccountName || failed=`expr $failed + 1`
nentries=`$ldbsearch $options -H $p://$SERVER $CONFIGURATION '(|(|(&(!(groupType:1.2.840.113556.1.4.803:=1))(groupType:1.2.840.113556.1.4.803:=2147483648)(groupType:1.2.840.113556.1.4.804:=10))(samAccountType=805306368))(samAccountType=805306369))' sAMAccountName | grep sAMAccountName | wc -l`
echo "Found $nentries entries"
if [ $nentries -lt 10 ]; then
echo "Should have found at least 10 entries"
failed=`expr $failed + 1`
fi
echo "Check rootDSE for Controls"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER -s base -b "" '(objectclass=*)' | grep -i supportedControl | wc -l`
if [ $nentries -lt 4 ]; then
echo "Should have found at least 4 entries"
failed=`expr $failed + 1`
fi
echo "Test Paged Results Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=paged_results:1:5 '(objectclass=user)' | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Paged Results Control test returned 0 items"
failed=`expr $failed + 1`
fi
echo "Test Server Sort Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=server_sort:1:0:sAMAccountName '(objectclass=user)' | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Server Sort Control test returned 0 items"
failed=`expr $failed + 1`
fi
echo "Test Extended DN Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=extended_dn:1 '(objectclass=user)' | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Extended DN Control test returned 0 items"
failed=`expr $failed + 1`
fi
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=extended_dn:1:0 '(objectclass=user)' | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Extended DN Control test returned 0 items"
failed=`expr $failed + 1`
fi
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=extended_dn:1:1 '(objectclass=user)' | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Extended DN Control test returned 0 items"
failed=`expr $failed + 1`
fi
echo "Test Domain scope Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=domain_scope:1 '(objectclass=user)' | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Extended Domain scope Control test returned 0 items"
failed=`expr $failed + 1`
fi
echo "Test Attribute Scope Query Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=asq:1:member -s base -b "CN=Administrators,CN=Builtin,$BASEDN" | grep sAMAccountName | wc -l`
if [ $nentries -lt 1 ]; then
echo "Attribute Scope Query test returned 0 items"
failed=`expr $failed + 1`
fi
echo "Test Search Options Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=search_options:1:2 '(objectclass=crossRef)' | grep crossRef | wc -l`
if [ $nentries -lt 1 ]; then
echo "Search Options Control Query test returned 0 items"
failed=`expr $failed + 1`
fi
echo "Test Search Options Control with Domain Scope Control"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER --controls=search_options:1:2,domain_scope:1 '(objectclass=crossRef)' | grep crossRef | wc -l`
if [ $nentries -lt 1 ]; then
echo "Search Options Control Query test returned 0 items"
failed=`expr $failed + 1`
fi
wellknown_object_test() {
local guid=$1
local object=$2
local basedns
local dn
local r
local c
local n
local failed=0
basedns="<WKGUID=${guid},${BASEDN}> <wkGuId=${guid},${BASEDN}>"
for dn in ${basedns}; do
echo "Test ${dn} => ${object}"
r=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER '(objectClass=*)' -b "${dn}" | grep 'dn: '`
n=`echo "${r}" | grep 'dn: ' | wc -l`
c=`echo "${r}" | grep "${object}" | wc -l`
if [ $n -lt 1 ]; then
echo "Object not found by WKGUID"
failed=`expr $failed + 1`
continue
fi
if [ $c -lt 1 ]; then
echo "Wrong object found by WKGUID: [${r}]"
failed=`expr $failed + 1`
continue
fi
done
return $failed
}
wellknown_object_test 22B70C67D56E4EFB91E9300FCA3DC1AA ForeignSecurityPrincipals
st=$?
if [ x"$st" != x"0" ]; then
failed=`expr $failed + $st`
fi
wellknown_object_test 2FBAC1870ADE11D297C400C04FD8D5CD Infrastructure
st=$?
if [ x"$st" != x"0" ]; then
failed=`expr $failed + $st`
fi
wellknown_object_test AB1D30F3768811D1ADED00C04FD8D5CD System
st=$?
if [ x"$st" != x"0" ]; then
failed=`expr $failed + $st`
fi
wellknown_object_test A361B2FFFFD211D1AA4B00C04FD7D83A Domain Controllers
st=$?
if [ x"$st" != x"0" ]; then
failed=`expr $failed + $st`
fi
wellknown_object_test AA312825768811D1ADED00C04FD8D5CD Computers
st=$?
if [ x"$st" != x"0" ]; then
failed=`expr $failed + $st`
fi
wellknown_object_test A9D1CA15768811D1ADED00C04FD8D5CD Users
st=$?
if [ x"$st" != x"0" ]; then
failed=`expr $failed + $st`
fi
echo "Getting HEX GUID/SID of $BASEDN"
HEXDN=`$ldbsearch $CONFIGURATION $options -b "$BASEDN" -H $p://$SERVER -s base "(objectClass=*)" --controls=extended_dn:1:0 distinguishedName | grep 'distinguishedName: ' | cut -d ' ' -f2-`
HEXGUID=`echo "$HEXDN" | cut -d ';' -f1`
echo "HEXGUID[$HEXGUID]"
echo "Getting STR GUID/SID of $BASEDN"
STRDN=`$ldbsearch $CONFIGURATION $options -b "$BASEDN" -H $p://$SERVER -s base "(objectClass=*)" --controls=extended_dn:1:1 distinguishedName | grep 'distinguishedName: ' | cut -d ' ' -f2-`
echo "STRDN: $STRDN"
STRGUID=`echo "$STRDN" | cut -d ';' -f1`
echo "STRGUID[$STRGUID]"
echo "Getting STR GUID/SID of $BASEDN"
STRDN=`$ldbsearch $CONFIGURATION $options -b "$BASEDN" -H $p://$SERVER -s base "(objectClass=*)" --controls=extended_dn:1:1 | grep 'dn: ' | cut -d ' ' -f2-`
echo "STRDN: $STRDN"
STRSID=`echo "$STRDN" | cut -d ';' -f2`
echo "STRSID[$STRSID]"
SPECIALDNS="$HEXGUID $STRGUID $STRSID"
for SPDN in $SPECIALDNS; do
echo "Search for $SPDN"
nentries=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER -s base -b "$SPDN" '(objectClass=*)' | grep "dn: $BASEDN" | wc -l`
if [ $nentries -lt 1 ]; then
echo "Special search returned 0 items"
failed=`expr $failed + 1`
fi
done
echo "Search using OIDs instead of names"
nentries1=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER '(objectClass=user)' name | grep "^name: " | wc -l`
nentries2=`$ldbsearch $options $CONFIGURATION -H $p://$SERVER '(2.5.4.0=1.2.840.113556.1.5.9)' name | grep "^name: " | wc -l`
if [ $nentries1 -lt 1 ]; then
echo "Error: Searching user via (objectClass=user): '$nentries1' < 1"
failed=`expr $failed + 1`
fi
if [ $nentries2 -lt 1 ]; then
echo "Error: Searching user via (2.5.4.0=1.2.840.113556.1.5.9) '$nentries2' < 1"
failed=`expr $failed + 1`
fi
if [ x"$nentries1" != x"$nentries2" ]; then
echo "Error: Searching user with OIDS[$nentries1] doesn't return the same as STRINGS[$nentries2]"
failed=`expr $failed + 1`
fi
exit $failed
|
amitay/samba
|
testprogs/blackbox/test_ldb.sh
|
Shell
|
gpl-3.0
| 8,014 |
#!/bin/sh
echo "YG" > /tmp/foos-debug.in
|
swehner/foos
|
debug/goal_yellow.sh
|
Shell
|
gpl-3.0
| 42 |
#!/bin/sh
# This script generates the po/nautilus-admin.pot file
FILEPATH="$(readlink -f "$0")"
DIR="$(dirname "$FILEPATH")"
cd "$DIR"
xgettext --package-name=nautilus-admin \
--package-version=1.1.9 \
--copyright-holder='Bruno Nova <[email protected]>' \
--msgid-bugs-address='https://github.com/brunonova/nautilus-admin/issues' \
-cTRANSLATORS \
-s -o "po/nautilus-admin.pot" \
"extension/nautilus-admin.py"
|
brunonova/nautilus-admin
|
generate-pot-file.sh
|
Shell
|
gpl-3.0
| 469 |
#!/bin/bash
# Copyright (C) <2014,2015> <Ding Wei>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Change log
# 170228: Ding Wei created it
export REV=$1
export ICHANGE_LOC=~/svn/ichange
export TOYEAR=$(date +%Y)
[[ -z $REV || ! -e $ICHANGE_LOC ]] && exit
export ICHANGE_RECORD=$(svn log -r $REV -v $ICHANGE_LOC | egrep ' M ' | grep -v all-change | awk -F' ' {'print $2'})
export ICHANGE_ENTRY=$(svn blame ~/svn/ichange/$ICHANGE_RECORD | grep $REV | awk -F' ' {'print $3'})
export GERRIT_SRV=$(echo $ICHANGE_RECORD | awk -F"$TOYEAR/" {'print $2'} | awk -F'/' {'print $1'})
cd $ICHANGE_LOC
export REMOTE_ENTRY=$(grep 'remote=' $(find | grep manifest$ | sort -u | tail -n1)/* | tail -n1 | awk -F'="' {'print $2'} | awk -F'"' {'print $1'})
[[ ! -z $REMOTE_ENTRY ]] && export REMOTE_PATH="$REMOTE_ENTRY/" || export REMOTE_PATH=''
echo 'git fetch ssh://'"$GERRIT_SRV"/"$REMOTE_PATH$(echo $ICHANGE_ENTRY | awk -F'|' {'print $5" "$9'})"' && git cherry-pick FETCH_HEAD'
rm -f /tmp/$REV.tmp
|
daviding924/ibuild
|
ichange/rev2patch.sh
|
Shell
|
gpl-3.0
| 1,582 |
#!/bin/dash
curl -e "http://www.google.com" -A "Mozilla/4.0" -skL "http://google.com/search?q=site:azlyrics.com+$(echo "$@" | tr ' ' '+')&btnI" | awk '
/<!-- start of lyrics -->/, /<!-- end of lyrics -->/ {
gsub("<[^>]*>", "")
gsub(/\r/, "")
print " " $0
}
' | p
|
israellevin/kalisher
|
overlay/root/bin/lyrics.sh
|
Shell
|
gpl-3.0
| 296 |
#!/bin/bash
export PATH=$PATH:/opt/texbin
# create data paths
mkdir -p /data/db
mkdir -p /data/user_files
mkdir -p /data/compiles
mkdir -p /data/cache
mkdir -p /data/tmp
mkdir -p /data/tmp/uploads
mkdir -p /data/tmp/dumpFolder
mkdir -p /data/logs/
mongod &
redis-server &
# Waiting for mongodb to startup
until nc -z localhost 27017
do
sleep 1
done
# replace CRYPTO_RANDOM in settings file
CRYPTO_RANDOM=$(dd if=/dev/urandom bs=1 count=32 2>/dev/null | base64 -w 0 | rev | cut -b 2- | rev | tr -d '\n+/'); \
sed -i "0,/CRYPTO_RANDOM/s/CRYPTO_RANDOM/$CRYPTO_RANDOM/" /etc/sharelatex/settings.coffee &
CRYPTO_RANDOM=$(dd if=/dev/urandom bs=1 count=32 2>/dev/null | base64 -w 0 | rev | cut -b 2- | rev | tr -d '\n+/'); \
sed -i "0,/CRYPTO_RANDOM/s/CRYPTO_RANDOM/$CRYPTO_RANDOM/" /etc/sharelatex/settings.coffee &
# start sharelatex with logging to files
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/chat/app.js >> /data/logs/chat.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/clsi/app.js >> /data/logs/clsi.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/docstore/app.js >> /data/logs/docstore.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/document-updater/app.js >> /data/logs/document-updater.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/filestore/app.js >> /data/logs/filestore.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/real-time/app.js >> /data/logs/real-time.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/spelling/app.js >> /data/logs/spelling.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/tags/app.js >> /data/logs/tags.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/track-changes/app.js >> /data/logs/track-changes.log 2>&1 &
SHARELATEX_CONFIG=/etc/sharelatex/settings.coffee node /sharelatex/web/app.js >> /data/logs/web.log 2>&1
|
DirkHeinke/sharelatex-docker
|
sharelatex.sh
|
Shell
|
gpl-3.0
| 2,023 |
:
# src_communityband/reset_membership_status.sh
# --------------------------------------------
# Input
# -----
echo "$0" "$*" 1>&2
if [ "$APPASERVER_DATABASE" != "" ]
then
application=$APPASERVER_DATABASE
elif [ "$DATABASE" != "" ]
then
application=$DATABASE
fi
if [ "$application" = "" ]
then
echo "Error in `basename.e $0 n`: you must first:" 1>&2
echo "\$ . set_database" 1>&2
exit 1
fi
if [ "$#" -ne 3 ]
then
echo "Usage: $0 ignored process_name execute_yn" 1>&2
exit 1
fi
process_name=$2
execute_yn=$3
# Constants
# ---------
# Variables
# ---------
process_title=`echo "$process_name" | format_initial_capital.e`
# Process
# -------
where="status is null or status = 'Active'"
statement="select count(*)
from band_member
where ${where};"
content_type_cgi.sh
echo "<html><head><link rel=stylesheet type=text/css href=/appaserver/$application/style.css></head>"
echo "<body><h1>$process_title</h1>"
echo "$statement" | sql.e | html_table.e '' 'Count' ''
if [ "$execute_yn" = 'y' ]
then
statement=" update band_member
set status = 'Not Paid'
where ${where};"
echo "$statement" | sql.e
echo "Process complete." | html_paragraph_wrapper
else
echo "Process not executed." | html_paragraph_wrapper
fi
echo "</body></html>"
process_table_name=`get_table_name $application process`
echo " update $process_table_name
set execution_count =
if(execution_count,execution_count+1,1)
where process = '$process_name';" |
sql.e
exit 0
|
timhriley/appaserver
|
src_communityband/reset_membership_status.sh
|
Shell
|
gpl-3.0
| 1,466 |
#!/bin/bash
if [ $# -ne 0 ]
then
echo " "
echo " "
for (( x = 1 ; x <= $1 ; x++ )); do
#Tabulacion
for (( y = $1 ; y >= x ; y-- )); do
echo -n " "
done
#resta
for (( z = 1 ; z <= x ; z++ )) ; do
echo -n "$z"
done
#Suma
for (( i = 2 ; i <= x ; i++ )); do
echo -n "$(($x - $i + 1))"
done
echo -n " "
echo " "
done
echo " "
echo " "
else
echo "Mete un parametro"
# ./Piramide.sh 5
fi
|
procamora/Scripts-Bash
|
Piramide.sh
|
Shell
|
gpl-3.0
| 424 |
#!/bin/bash
set -v
make clean
make
bin/qvz -u fref.txt -c 1 -f 0.5 -s test.in test.q > write
bin/qvz -x test.q test.dec > read
diff fref.txt test.dec
|
mikelhernaez/qvz
|
test.sh
|
Shell
|
gpl-3.0
| 153 |
java -cp ./bin/:./lib/xdb.jar:./lib/mysql-connector-java-5.1.38-bin.jar org.xdb.MigrationClient $1 $2 $3
|
BrownBigData/xdb_elasticity
|
migration_client.sh
|
Shell
|
gpl-3.0
| 104 |
#!/bin/bash
# This bash script will be set as ENTRYPOINT, all additional arguments to the proactive node
# can be set in the COMMAND/CMD which will be propagated to the bash script as argument.
# Those arguments will be added to the ProActive node exeuction via $@
echo "Start docker daemon"
# Start docker deamon: from jpetazzo/dind
/usr/local/bin/wrapdocker
echo "Start ProActive node: /data/scheduling/bin/proactive-node $@"
/data/scheduling/bin/proactive-node "$@"
|
tobwiens/proactive-node-dockerfile
|
6.1.0/container-start-script.sh
|
Shell
|
gpl-3.0
| 473 |
set icon="slackware"
set kcmdline="rcutree.rcu_idle_gp_delay=1 lang_en dousb aa=/dev/loop1";
set kernel_img="(loop)/austrumi/bzImage"
set initrd_img="(loop)/austrumi/initrd.gz";
echo $"Loading ...";
loopback -d md_initrd
loopback -m md_initrd ${initrd_img};
set search_str="if test %-f \$CD/austrumi/austrumi.fs; then";
set replace_str="if [ %-f \$CD\$sr ]; then losetup \$aa \$CD\$sr";
set src_file="(md_initrd)/init";
lua ${prefix}/write.lua;
set search_str="echo \" Austrumi found at \$device\"";
set replace_str="CD=\$bb; device=loop1; mount \$aa \$bb ";
set src_file="(md_initrd)/init";
lua ${prefix}/write.lua;
set search_str="# fdisk %-l | grep ^/dev | cut %-c6%-9";
set replace_str="bb=/mnt/loop1; mkdir \$bb; echo \$bb ";
set src_file="(md_initrd)/init";
lua ${prefix}/write.lua;
menuentry $"1. Run from RAM / Eject USB" --class $icon{
linux ${kernel_img} $kcmdline $linux_extra emb_user;
initrd (md_initrd);
}
menuentry $"2. Do not eject USB" --class $icon{
linux ${kernel_img} $kcmdline $linux_extra emb_user nocache;
initrd (md_initrd);
}
menuentry $"3. Superuser" --class $icon{
linux ${kernel_img} $kcmdline $linux_extra;
initrd (md_initrd);
}
menuentry $"4. Text mode" --class $icon{
linux ${kernel_img} $kcmdline $linux_extra emb_user text;
initrd (md_initrd);
}
|
a1ive/grub2-filemanager
|
boot/grubfm/distro/austrumi.sh
|
Shell
|
gpl-3.0
| 1,321 |
#for f in Transcriptions/*/*.docx; do echo $f; docxtotei --profile=oulipo $f; done
#mv Transcriptions/*/*.xml XML
cd XML
saxon -it:main -o:generated.odd ../oddbyexample.xsl corpus=.
|
lb42/difdepo
|
2014-10-Seminaire/convert.sh
|
Shell
|
gpl-3.0
| 184 |
#!/bin/bash
#author :Ricardo de Souza Maia
#license :GPLv3
#version :$Id
#encoding :UTF-8
#Based on GNU/Linux CentOS /etc/init.d/httpd script
# httpd -f /etc/httpd-seplan/conf/httpd.conf -d /etc/httpd-seplan/
# start|restart|graceful|stop|graceful-stop
#httpd -f /etc/httpd-seplan/conf/httpd.conf -d /etc/httpd-seplan/ -t
$prog="httpd-$USERNAME"
$pidfile="/etc/httpd-$USERNAME/run/httpd.pid"
if [ -f /usr/sbin/httpd ]; then
echo "Daemon httpd não encontrado."
fi
case "$1" in
start)
echo -n $"Starting $prog: "
httpd -f /etc/httpd-$USERNAME/conf/httpd.conf -d /etc/httpd-$USERNAME/
;;
stop)
echo -n $"Stopping $prog: "
kill -9 $(<"/etc/httpd-$USERNAME/run/httpd.pid")
rm /etc/httpd-$USERNAME/run/httpd.pid
;;
status)
status -p ${pidfile} $httpd
RETVAL=$?
;;
restart)
httpd -f /etc/httpd-$USERNAME/conf/httpd.conf -d /etc/httpd-$USERNAME/ -k restart
;;
configtest)
$apachectl $@
RETVAL=$?
;;
*)
echo $"Usage: $prog {start|restart|graceful|stop|graceful-stop|configtest|status}"
RETVAL=2
esac
exit $RETVAL
|
google-code/apache-isolated-instances
|
httpd.sh
|
Shell
|
gpl-3.0
| 1,204 |
echo "Starting Diamond DMS ..."
nohup java -classpath ./lib/activation-1.1.jar:./lib/commons-fileupload-1.2.2.jar:./lib/commons-io-2.0.1.jar:./lib/commons-lang3-3.1.jar:./lib/commons-logging.jar:./lib/commons-modeler.jar:./lib/commons-validator-1.4.0.jar:./lib/derby.jar:./lib/dmsmodel.jar:./lib/dmsserver.jar:./lib/dmsweb.jar:./lib/ecj-4.4.jar:./lib/filters-2.0.235.jar:./lib/itextpdf-5.2.0.jar:./lib/itext-xtra-5.2.0.jar:./lib/jcommon-1.0.16.jar:./lib/jdom-2.0.3.jar:./lib/mail-1.4.jar:./lib/tomcat-dbcp.jar:./lib/tomcat-embed-core.jar:./lib/tomcat-embed-el.jar:./lib/tomcat-embed-jasper.jar:./lib/tomcat-embed-logging-juli.jar:./lib/tomcat-embed-logging-log4j.jar:./lib/tomcat-embed-websocket.jar kreidos.diamond.KRYSTALServer &
echo "Diamond DMS Started ..."
|
Kreidos/diamond
|
script/diamonddms.sh
|
Shell
|
gpl-3.0
| 768 |
#!/bin/bash
# easy_install3 simple-crypt
aptitude install python3-yaml python3-nmap python3-dialog python3-netifaces python3-netaddr python3-psutil
|
berserkerbernhard/Lidskjalv
|
code/networkmonitor/install_dependencies.sh
|
Shell
|
gpl-3.0
| 150 |
#! /bin/bash
# FoxCatcher - a bulk firefox downloader
# Downloads multiple versions of firefox with different locales
# for testing translations and accept language headers
# dillbyrne GNU GPL v3
catch_foxes()
{
declare -a LANGS=( $(echo $3 | sed -e 's/,/ /g') )
for L in "${LANGS[@]}"
do
# Check the version has been downloaded already
# Skip it if so
if [ ! -e "foxes/.browsers/firefox-$1-$L" ]
then
# Download the specified browser
echo "Downloading firefox $1-$L"
curl -s https://download-installer.cdn.mozilla.net/pub/firefox/releases/$1/$2/$L/firefox-$1.tar.bz2 > foxes/.browsers/$1-$L.tar.bz2
# Extract to subfolder and delete tar file
echo "Extracting tar file to ./browsers/firefox-$1-$L"
tar -xf foxes/.browsers/$1-$L.tar.bz2 --transform "s/firefox/firefox-$1-$L/"
mv firefox-$1-$L foxes/.browsers/
rm foxes/.browsers/$1-$L.tar.bz2
# Make symlinks. Lead filenames with Luage code for better tab completion
echo "Making symbolic link"
ln -s .browsers/firefox-$1-$L/firefox ./foxes/$L-$1
echo
else
echo "Skipping firefox-$1-$L"
echo
fi
done
echo "Done"
}
setup_dirs()
{
# Setup Directories
if [ ! -d foxes/.browsers ]
then
mkdir -p foxes/.browsers
fi
}
show_locales()
{
echo
echo "-----------------------------------------------------------"
echo " FoxCatcher - Available Locales"
echo "-----------------------------------------------------------"
echo
echo "ach af an ar as ast az"
echo "be bg bn-BD bn-IN br bs"
echo "ca cs cy da de dsb"
echo "el en-GB en-US en-ZA eo es-AR es-CL es-ES es-MX et eu"
echo "fa ff fi fr fy-NL"
echo "ga-IE gd gl gu-IN he hi-IN hr hsb hu hy-AM"
echo "id is it ja kk km kn ko lij lt lv"
echo "mai mk ml mr ms nb-NO nl nn-NO"
echo "or pa-IN pl pt-BR pt-PT rm ro ru"
echo "si sk sl son sq sr sv-SE ta te th tr"
echo "uk uz vi xh zh-CN zh-TW"
echo
echo "-----------------------------------------------------------"
echo
}
usage()
{
echo
echo "-----------------------------------------------------------"
echo " FoxCatcher"
echo "-----------------------------------------------------------"
echo
echo
echo "Usage: foxcatcher -l locales -v version -p platform"
echo
echo -l locales : a comma separated list of locales
echo
echo -v version : the version of firefox to download
echo
echo -p platform : the platfrom to download download for
echo
echo -c : show available locale codes
echo
echo "e.g: ./foxcatcher -l ru,be,fr-FR -v 37.0.1 -p linux-x86_64"
echo
echo
echo "-----------------------------------------------------------"
echo
}
check_args()
{
if [[ "$1" == "" || "$2" == "" || "$3" == "" ]]
then
usage
exit 1
fi
}
main()
{
local OPTIND=1 #Option Index
local version=""
local locales=""
local platform=""
while getopts "h?:l:v:p:c" opt;
do
case "$opt" in
h|\?)
usage
exit 0
;;
l) locales=$OPTARG
;;
v) version=$OPTARG
;;
p) platform=$OPTARG
;;
c) show_locales
exit 0
;;
esac
done
shift $((OPTIND-1))
check_args $version $platform $locales
setup_dirs
catch_foxes $version $platform $locales
}
main $*
|
dillbyrne/addon-dev-tools
|
foxcatcher.sh
|
Shell
|
gpl-3.0
| 3,164 |
#!/bin/bash
###########################################################################
# Copyright (c) 2011-2014 Unixmedia S.r.l. <[email protected]>
# Copyright (c) 2011-2014 Franco (nextime) Lanza <[email protected]>
#
# Domotika System Controller Daemon "domotikad" [http://trac.unixmedia.it]
#
# This file is part of domotikad.
#
# domotikad is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
rm -f `find . -name '*.pyc'`
rm -f `find . -name '*~'`
rm -f log/*.log
rm -f run/*.pid
|
nexlab/domotikad
|
clean.sh
|
Shell
|
gpl-3.0
| 1,138 |
#!/usr/bin/env zsh
# Copyright (C) 2016 Nico Bäurer
# #
# # This program is free software: you can redistribute it and/or modify
# # it under the terms of the GNU General Public License as published by
# # the Free Software Foundation, either version 3 of the License, or
# # (at your option) any later version.
# #
# # This program is distributed in the hope that it will be useful,
# # but WITHOUT ANY WARRANTY; without even the implied warranty of
# # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# # GNU General Public License for more details.
# #
# # You should have received a copy of the GNU General Public License
# # along with this program. If not, see <http://www.gnu.org/licenses/>.
source $HOME/i3/color.sh
source $HOME/i3/lemonbar-progress.sh
# start
# ./lemonbar-test.sh | lemonbar -b -B "#00000000" -F "#fff" -g 1600x20+0+0 -f "FontAwesome:size=10" -f "Monospace:letterSpace=-5:size=7" -f "Ubuntu Mono derivative Powerline:size=10" | bash
ICO_PREV="\uf137"
#"\uF049"
ICO_NEXT="\uf138"
#"\uF050"
ICO_PLAY="\uf144"
#"\uF04B"
ICO_STOP="\uf28d"
#"\uF04D"
ICO_PAUSE="\uf28b"
#"\uF04C"
DEFAULT_COLOR="%{F#$COLOR_TEXT_HEX_RAW}"
DEFAULT_ICO_COLOR_RAW="${COLOR_ACCENT_HEX_RAW}"
DEFAULT_ICO_COLOR="%{F#$DEFAULT_ICO_COLOR_RAW}"
#"fb4757"
MPC_PARAMS=( -h localhost -p 6600 )
mpccmd() {
echo -n "mpc ${MPC_PARAMS} $@"
}
song() {
text=$(mpc current $MPC_PARAMS -f '[[%artist% - ]%file%]' | grep -Pzo '[^\/]+\.[^\/]+' | grep -Po $'[äöüÄÖÜßa-zA-Z0-9\\-\\_ \\.\'\"]+' | tr '\n' ' ' | sed 's/ \+/ /g' | xargs -0)
echo -n "%{T3}${text:0:50}%{T-}"
}
padding() {
printf "%-0${1}s" " "
}
icon() {
echo -n $DEFAULT_ICO_COLOR
echo -n "%{A:$2:}"
padding 3
echo -n "$1"
padding 3
echo -n "%{A}"
echo -n $DEFAULT_COLOR
}
controlprev() {
icon $ICO_PREV "$(mpccmd prev)"
}
controlnext() {
icon $ICO_NEXT "$(mpccmd next)"
}
controlstop() {
icon $ICO_STOP "$(mpccmd stop)"
}
controlplay() {
icon $ICO_PLAY "$(mpccmd play)"
}
controlpause() {
icon $ICO_PAUSE "$(mpccmd pause)"
}
controls() {
controlprev
if [[ $(echo $mpcstat | grep -i playing) ]] ; then
controlstop
controlpause
elif [[ $(echo $mpcstat | grep -i paused) ]] ; then
controlstop
controlplay
else
controlplay
fi
controlnext
}
# echo "\u2013\u2013\u2013\u2758\u2013"
progress=0
max=100
mpcstat=""
while true; do
if [[ $progress -gt $max ]] ; then
progress=0
sleep 5
fi
# echo -n $(progressbair 10 20 "ffffff")
mpcstat=$(mpc $MPC_PARAMS status)
tmp_progress=$(echo -n $mpcstat | grep -i playing | sed 's/.*(//' | sed 's/%.*//')
[[ ! -z $tmp_progress ]] && progress=$tmp_progress
# color="46D14C"
# [[ $progress -gt 30 ]] && color="ffffff"
# [[ $progress -gt 70 ]] && color="" #color="${COLOR_ACCENT_HEX_RAW}"
echo -n "%{l}$(padding 4)$(song)"
echo -n "%{c}"
echo -n "$(progressbar $progress $max)"
echo "%{r}$(controls)$(padding 1)"
# echo "%{r}"$(progressbar 15 20 "46D14C")
sleep .3
done
|
seebye/dotfiles
|
i3/lemonbar-music.sh
|
Shell
|
gpl-3.0
| 2,938 |
#!/bin/bash
###################################
# $Id: backup 379 2012-04-02 08:43:42Z netkiller $
# Author: [email protected]
# Home: http://www.netkiller.cn
###################################
#Number of copies
COPIES=30
###################################
BACKUP_HOST="localhost"
BACKUP_USER="admin"
BACKUP_PASS=""
BACKUP_DBNAME="dbname"
BACKUP_DIR=/opt/backup
####################################
DUMP="/usr/bin/mongodump"
LOGFILE=/var/tmp/backup.mongodb.log
#TIMEPOINT=$(date -u +%Y-%m-%d)
TIMEPOINT=$(date -u +%Y-%m-%d.%H:%M:%S)
DUMP_OPTS="-h $BACKUP_HOST -u$BACKUP_USER -p$BACKUP_PASS"
####################################
umask 0077
test ! -d "$BACKUP_DIR" && mkdir -p "$BACKUP_DIR"
test ! -w $BACKUP_DIR && echo "Error: $BACKUP_DIR is un-writeable." && exit 0
for dbname in $BACKUP_DBNAME
do
test ! -d "$BACKUP_DIR/$dbname" && mkdir -p "$BACKUP_DIR/$dbname"
$DUMP $DUMP_OPTS -d $dbname -o $BACKUP_DIR/$TIMEPOINT >> $LOGFILE
done
find $BACKUP_DIR -type f -mtime +$COPIES -delete
|
oscm/shell
|
backup/backup.mongodb.sh
|
Shell
|
gpl-3.0
| 992 |
#!/bin/bash
last_status=0
req_string="https://api.vk.com/method/users.get?user_ids=347745&fields=online&v=5.8"
while (( 1 ))
do
sleep 60
api_ans=$(wget "$req_string" -q -O - | grep -oP "\"online\":\d")
cur_status=${api_ans: -1}
if [[ $cur_status != $last_status ]]
then
if [[ $cur_status == 0 ]]
then
cat <<< "AM goes offline" | wall
else
cat <<< "AM goes online" | wall
fi
last_status=$cur_status
fi
done
|
ItsLastDay/academic_university_2016-2018
|
subjects/Bash&Python/bash_hw2/5.sh
|
Shell
|
gpl-3.0
| 496 |
#!/bin/sh
cd "$(dirname "$0")"
exec ./1812_aventura_linux_x86.x86 "$@"
|
Firenz/1812
|
executables/linux/x86/run_1812.sh
|
Shell
|
gpl-3.0
| 74 |
#!/bin/sh
# Ensure that moving hard-linked arguments onto existing destinations works.
# Likewise when using cp --preserve=link.
# Copyright (C) 2003-2018 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ cp mv
skip_if_root_
mkdir dst || framework_failure_
(cd dst && touch a b c) || framework_failure_
touch a || framework_failure_
ln a b || framework_failure_
ln a c || framework_failure_
# ======================================
cp --preserve=link a b c dst || fail=1
# The source files must remain.
test -f a || fail=1
test -f b || fail=1
test -f c || fail=1
cd dst
# Three destination files must exist.
test -f a || fail=1
test -f b || fail=1
test -f c || fail=1
# The three i-node numbers must be the same.
ia=$(ls -i a|sed 's/ a//')
ib=$(ls -i b|sed 's/ b//')
ic=$(ls -i c|sed 's/ c//')
test $ia = $ib || fail=1
test $ia = $ic || fail=1
cd ..
rm -f dst/[abc]
(cd dst && touch a b c)
# ======================================
mv a b c dst || fail=1
# The source files must be gone.
test -f a && fail=1
test -f b && fail=1
test -f c && fail=1
cd dst
# Three destination files must exist.
test -f a || fail=1
test -f b || fail=1
test -f c || fail=1
# The three i-node numbers must be the same.
ia=$(ls -i a|sed 's/ a//')
ib=$(ls -i b|sed 's/ b//')
ic=$(ls -i c|sed 's/ c//')
test $ia = $ib || fail=1
test $ia = $ic || fail=1
Exit $fail
|
pexip/os-coreutils
|
tests/mv/hard-2.sh
|
Shell
|
gpl-3.0
| 2,030 |
#!/usr/bin/env bash
# usage: travis.sh before|after
if [ $1 == 'before' ]; then
composer self-update
# install php-coveralls to send coverage info
composer init --require=satooshi/php-coveralls:0.7.0 -n
composer install --no-interaction --ignore-platform-reqs
elif [ $1 == 'after' ]; then
if [ "$TRAVIS_PHP_VERSION" != "7.0" ] && [ "$TRAVIS_PHP_VERSION" != "hhvm" ]; then
wget https://scrutinizer-ci.com/ocular.phar
php ocular.phar code-coverage:upload --format=php-clover ./tmp/clover.xml
fi
fi
|
geminilabs/pollux
|
tests/bin/travis.sh
|
Shell
|
gpl-3.0
| 516 |
#!/bin/bash
##
## @brief @(#) Test domain 3
##
## @file dom3.sh
##
## -----------------------------------------------------------------------------
## Enduro/X Middleware Platform for Distributed Transaction Processing
## Copyright (C) 2009-2016, ATR Baltic, Ltd. All Rights Reserved.
## Copyright (C) 2017-2019, Mavimax, Ltd. All Rights Reserved.
## This software is released under one of the following licenses:
## AGPL (with Java and Go exceptions) or Mavimax's license for commercial use.
## See LICENSE file for full text.
## -----------------------------------------------------------------------------
## AGPL license:
##
## This program is free software; you can redistribute it and/or modify it under
## the terms of the GNU Affero General Public License, version 3 as published
## by the Free Software Foundation;
##
## This program is distributed in the hope that it will be useful, but WITHOUT ANY
## WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
## PARTICULAR PURPOSE. See the GNU Affero General Public License, version 3
## for more details.
##
## You should have received a copy of the GNU Affero General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## -----------------------------------------------------------------------------
## A commercial use license is available from Mavimax, Ltd
## [email protected]
## -----------------------------------------------------------------------------
##
export NDRX_IPCKEY=56000
export NDRX_RNDK="IBvyyg8"
export NDRX_QPREFIX=/dom3
export NDRX_NODEID=3
export NDRX_DPID=/tmp/ndrxd-dom3.pid
# vim: set ts=4 sw=4 et smartindent:
|
endurox-dev/endurox
|
atmitest/dom3.sh
|
Shell
|
agpl-3.0
| 1,733 |
#!/bin/sh
#
# Copyright (c) 2014, Scott J Maddox
#
# This file is part of SimpleQW.
#
# SimpleQW is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimpleQW is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimpleQW. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
# Clean out existing build and venv files
rm -rf build
rm -rf dist
rm -rf venv
# Activate a clean virtualenv
virtualenv venv --no-site-packages
source venv/bin/activate
# Install cython
pip install cython nose
# Test setup.py install
python setup.py install
python setup.py test
python -m unittest simpleqw.tests.test_finite_well_1d
# Deactivate the virtual python environment
deactivate
|
scott-maddox/simpleqw
|
test_setup.sh
|
Shell
|
agpl-3.0
| 1,224 |
#!/bin/sh
# Stamp logs of Symfony1.x projects with the date of the last log line
# (of each file in log/history/). Support gzip compression.
# Tested with sf1.4 (the PHP framework) under Linux with bash & zless installed.
#
# Created on 2012-11-27 by Julien Moreau aka PixEye
for f in log/history/*.log*
do
echo
ls -Flh --time-style=long-iso "$f"
lastl=`zless "$f"|tail -n1` # last log line
echo $lastl
d=`echo $lastl|cut -d' ' -f -3`
touch -d "$d" "$f" 2> /dev/null || sudo touch -d "$d" "$f"
ls -Flh --time-style=long-iso "$f"
done
|
PixEye/PixShellScripts
|
sf-date-logs.sh
|
Shell
|
agpl-3.0
| 546 |
#!/bin/bash
#######################################################################
#
# MNXB01-2017-HW3b
# File: pokemoninfo.sh.skeleton file
# Author: Florido Paganelli [email protected]
# Lund University
#
########################################################################
# The script must take in input as an argument the directory where the
# database is stored.
# for example:
# ./pokemoninfo.sh dataset/
# note that the name 'dataset' should not be hardcoded. It can be any directory
# name. Make sure to read slides 33,34,39
# Store the folder name in a variable called DBDIR.
DBDIR=$1
# use this function to show an error message with usage information.
errormsg() {
echo "Usage:"
echo "$0 <directory>"
echo "directory must be a path containing a csv dataset."
}
### Exercise 1: 1 points
# Write an error and exit if no command line argument exists or if the argument
# is empty (it could be a variable!)
# hint: use the if construct and the proper conditions to verify the arguments
# YOUR CODE HERE
if [[ $# -eq 0 ]]; then
echo "Not enough arguments. Must be at least 1!";
exit 1;
fi
if [ -z "$1" ]; then
echo "Empty argument!"
exit 1;
fi
### Exercise 2: 1 points
# Write an error and exit if the DBDIR directory does not exist or it's not a directory.
# Hint: read http://tldp.org/LDP/Bash-Beginners-Guide/html/sect_07_01.html
# YOUR CODE HERE
if [ ! -e "$DBDIR" ]; then
echo "Argument does not exist!";
exit 1;
fi
if [ ! -d "$DBDIR" ]; then
echo "Argument is not a directory!";
exit 1;
fi
### Exercise 3: 1 point
# Use the grep command to find which file contains "Pokémon Red Version"
# and output the results on screen.
# grep examples: http://tldp.org/LDP/Bash-Beginners-Guide/html/sect_04_02.html
echo -e "\nSearching for Pokémon Red..."
# YOUR CODE HERE
grep "Pokémon Red Version" $DBDIR*
### Exercise 4: 1 point
# delete existing allplatform.csv file in preparation of the next exercise
echo -e "\nRemoving old allplatforms.csv"
# YOUR CODE HERE
if [ -e allplatforms.csv ]; then
rm allplatforms.csv
fi
if [ -e allplatforms.ordered.csv ]; then
rm allplatforms.ordered.csv;
fi
### Exercise 5: 3 points
# Write a for loop that takes every file in the database and puts it
# into a single file
# called allplatforms.csv.
# Inspect the csv files to understand their structure,
# and make sure to remove the header lines.
# Hint: use the slides about for ... do ... done
# use the tail command to remove the header lines (check 'man tail'),
# use the file concatenator '>>' to write out the allplatforms.csv file
# create allplatforms file with a for loop
echo -e "\nCreating new allplatforms.csv"
# YOUR FOR LOOP HERE
for f in $DBDIR*; do
tail -n +2 $f >> allplatforms.csv
done
### Exercise 4: 1 point
# Sort the contents of the allplatforms.csv file by using the sort
# command and write the result in allplatforms.ordered.csv
# Hint: use \" as a delimiter for sort. Check 'man sort'
echo -e "\nSorting allplatforms.csv..."
# YOUR CODE HERE
sort -t, -k2 allplatforms.csv > allplatforms.ordered.csv
# Exercise 5: 4 points
# Write a for loop that, for each file, counts all the games
# in each file. Inspect the csv file to understand the structure of the
# csv file to get the right numbers.
# Hint: use the slides about for ... do ... done
# use the '$()' syntax to put the output of a command inside a variable
# use the program tail to get rid of useless lines
# use the program wc to count things
# make use of the | symbol to compose
# use the 'basename' command to get rid of the directory part of the filename
# output the result in this form:
# <filename> has <number of games> games <newline>
# example output:
# poke.Android.csv has 2 game(s)
# poke.iOS.csv has 1 game(s)
echo -e "\nCalculating number of games for each file..."
#YOUR CODE HERE
for f in $DBDIR*; do
GAMES=$(tail -n +2 $f | wc -l)
NAME=$(basename $f)
echo $NAME" has " $GAMES" game(s)"
done
exit 0;
|
floridop/MNXB01-2017
|
alehuusko/HW3b/pokemoninfo.sh
|
Shell
|
agpl-3.0
| 4,042 |
#!/bin/bash
exit_status=0
function t() {
expected=$1
shift
diff -u --label Expected <( echo -n "$expected" ) \
--label Actual <( ./planeteer "$@" ) || exit_status=1
}
t ' 850,000 Buy 300 Medical Units
850,000 Jump from Earth to Sickonia (2 hyper jump units)
3,220,600 Sell 300 Medical Units
2,770,600 Buy 300 Heating Units
2,770,600 Jump from Sickonia to Hothor (2 hyper jump units)
5,200,600 Sell 300 Heating Units
4,999,600 Buy 300 Medical Units
4,999,600 Jump from Hothor to Sickonia (2 hyper jump units)
7,370,200 Sell 300 Medical Units
6,920,200 Buy 300 Heating Units
6,920,200 Jump from Sickonia to Hothor (2 hyper jump units)
9,350,200 Sell 300 Heating Units
9,149,200 Buy 300 Medical Units
9,149,200 Jump from Hothor to Sickonia (2 hyper jump units)
11,519,800 Sell 300 Medical Units
11,069,800 Buy 300 Heating Units
11,069,800 Jump from Sickonia to Hothor (2 hyper jump units)
13,499,800 Sell 300 Heating Units
13,298,800 Buy 300 Medical Units
13,298,800 Jump from Hothor to Sickonia (2 hyper jump units)
15,669,400 Sell 300 Medical Units
15,219,400 Buy 300 Heating Units
15,219,400 Jump from Sickonia to Hothor (2 hyper jump units)
17,649,400 Sell 300 Heating Units
' --funds 1000000 --start Earth
t '' --funds 1000000 --start Earth --fuel 0
t ' 850,000 Buy 300 Medical Units
850,000 Jump from Earth to HomeWorld (2 hyper jump units)
3,256,900 Sell 300 Medical Units
' --funds 1000000 --start Earth --fuel 2
t ' 850,000 Buy 300 Medical Units
850,000 Jump from Earth to Sickonia (2 hyper jump units)
3,220,600 Sell 300 Medical Units
2,770,600 Buy 300 Heating Units
2,770,600 Jump from Sickonia to Hothor (2 hyper jump units)
5,200,600 Sell 300 Heating Units
' --funds 1000000 --start Earth --fuel 4
t ' 850,000 Buy 300 Medical Units
850,000 Jump from Earth to Sickonia (2 hyper jump units)
3,220,600 Sell 300 Medical Units
2,770,600 Buy 300 Heating Units
2,770,600 Jump from Sickonia to Hothor (2 hyper jump units)
5,200,600 Sell 300 Heating Units
4,999,600 Buy 300 Medical Units
4,999,600 Jump from Hothor to HomeWorld (2 hyper jump units)
7,406,500 Sell 300 Medical Units
' --funds 1000000 --start Earth --fuel 6
t ' 3,900,000 Buy 300 AntiCloak Scanners
3,900,000 Jump from Metallica to Loony (2 hyper jump units)
32,790,000 Sell 300 AntiCloak Scanners
' --funds 30000000 --start Metallica --fuel 2
t ' 19,934,000 Buy 300 Ground Weapons
19,934,000 Jump from Metallica to Tribonia (2 hyper jump units)
21,779,300 Sell 300 Ground Weapons
' --funds 20000000 --start Metallica --fuel 2
t ' 850,000 Buy 300 Medical Units
850,000 Jump from Earth to Sickonia (2 hyper jump units)
3,220,600 Sell 300 Medical Units
2,770,600 Buy 300 Heating Units
2,770,600 Jump from Sickonia to Hothor (2 hyper jump units)
5,200,600 Sell 300 Heating Units
4,999,600 Buy 300 Medical Units
4,999,600 Jump from Hothor to HomeWorld (2 hyper jump units)
7,406,500 Sell 300 Medical Units
7,303,000 Buy 300 Novelty Packs
7,303,000 Jump from HomeWorld to Gojuon (2 hyper jump units)
8,486,500 Sell 300 Novelty Packs
'$'\r'' 863,700 Cost of --end Gojuon
' --funds 1000000 --start Earth --end Gojuon --fuel 8
t ' 996,500 Buy 7 Medical Units
996,500 Jump from Earth to Sickonia (2 hyper jump units)
1,051,814 Sell 7 Medical Units
1,041,314 Buy 7 Heating Units
1,041,314 Jump from Sickonia to Hothor (2 hyper jump units)
1,098,014 Sell 7 Heating Units
' --funds 1000000 --start Earth --fuel 4 --hold 7
t ' 1,121,500 Sell 300 Clothes Bundles
971,500 Buy 300 Medical Units
971,500 Jump from Earth to HomeWorld (2 hyper jump units)
3,378,400 Sell 300 Medical Units
' --funds 1000000 --start Earth --fuel 2 --start_hold 'Clothes Bundles'
t ' 1,000,000 Jump from Earth to Eden (1 hyper jump units)
950,000 Buy 2 Eden Warp Units
923,776 Buy 298 Tree Growth Kits
923,776 Eden warp from Eden to Zoolie
3,026,464 Sell 298 Tree Growth Kits
2,909,854 Buy 299 Medical Units
2,909,854 Eden warp from Zoolie to HomeWorld
5,308,731 Sell 299 Medical Units
5,308,731 Jump from HomeWorld to Eden (1 hyper jump units)
5,258,731 Buy 2 Eden Warp Units
5,232,507 Buy 298 Tree Growth Kits
5,232,507 Eden warp from Eden to Zoolie
7,335,195 Sell 298 Tree Growth Kits
7,218,585 Buy 299 Medical Units
7,218,585 Eden warp from Zoolie to HomeWorld
9,617,462 Sell 299 Medical Units
9,617,462 Jump from HomeWorld to Eden (1 hyper jump units)
9,567,462 Buy 2 Eden Warp Units
9,541,238 Buy 298 Tree Growth Kits
9,541,238 Eden warp from Eden to Zoolie
11,643,926 Sell 298 Tree Growth Kits
11,527,316 Buy 299 Medical Units
11,527,316 Eden warp from Zoolie to HomeWorld
13,926,193 Sell 299 Medical Units
' --funds 1000000 --start Earth --fuel 3 --flight_plan Eden,Eden,Eden
t ' 965,500 Buy 300 Medical Units
965,500 Jump from Medoca to Eden (1 hyper jump units)
1,100,500 Sell 300 Medical Units
1,050,500 Buy 2 Eden Warp Units
'$'\r''Use 1 extra edens, make an extra 2,256,084 ( 2,256,084 per eden)
'$'\r''Use 2 extra edens, make an extra 4,358,731 ( 2,179,365 per eden)
' --funds 1000000 --start Medoca --fuel 1 --flight_plan Eden --end_edens 2
t ' 965,500 Buy 300 Medical Units
965,500 Jump from Medoca to Eden (1 hyper jump units)
1,100,500 Sell 300 Medical Units
1,050,500 Buy 2 Eden Warp Units
' --funds 1000000 --start Medoca --fuel 1 --flight_plan Eden --end_edens 2 --extra_stats=false
t ' 907,322 Buy 298 Ground Weapons
907,322 Eden warp from Desha Rockna to Tribonia
2,740,320 Sell 298 Ground Weapons
2,558,528 Buy 299 Tree Growth Kits
2,558,528 Jump from Tribonia to Zoolie (1 hyper jump units)
4,668,272 Sell 299 Tree Growth Kits
4,551,662 Buy 299 Medical Units
4,551,662 Eden warp from Zoolie to Sickonia
6,914,360 Sell 299 Medical Units
6,464,360 Buy 300 Heating Units
6,464,360 Jump from Sickonia to Hothor (1 hyper jump units)
8,894,360 Sell 300 Heating Units
' --funds 1000000 --start 'Desha Rockna' --start_edens 2 --fuel 2 --flight_plan Zoolie,Hothor
t ' 550,000 Buy 300 Heating Units
550,000 Jump from Dune to Hothor (2 hyper jump units)
2,980,000 Sell 300 Heating Units
2,974,400 Buy a Cloak
2,774,070 Buy 299 Medical Units
2,774,070 Jump from Hothor to HomeWorld (2 hyper jump units)
5,172,947 Sell 299 Medical Units
' --funds 1000000 --start Dune --fuel 4 --cloak
t ' 173,900,000 Buy 300 AntiCloak Scanners
173,900,000 Jump from Metallica to Loony (2 hyper jump units)
202,790,000 Sell 300 AntiCloak Scanners
201,965,000 Buy 300 Device Of Cloakings
201,965,000 Jump from Loony to WeaponWorld (2 hyper jump units)
76,335,368 Buy 654321 Fighter Drones
77,658,368 Sell 300 Device Of Cloakings
'$'\r''Drones were 194.26 each
' --funds 200000000 --start Metallica --fuel 4 --drones 654321
t ' 29,786,400 Buy 300 Tree Growth Kits
29,786,400 Jump from Gojuon to Metallica (2 hyper jump units)
23,913,582 Buy 87654 Shield Batterys
25,157,682 Sell 300 Tree Growth Kits
'$'\r''Batteries were 77.88 each
' --funds 30000000 --start Gojuon --fuel 3 --batteries 87654
t ' 29,908,800 Buy 300 Ground Weapons
29,908,800 Jump from Gojuon to Tribonia (2 hyper jump units)
31,754,100 Sell 300 Ground Weapons
31,571,700 Buy 300 Tree Growth Kits
31,571,700 Jump from Tribonia to Metallica (2 hyper jump units)
25,698,882 Buy 87654 Shield Batterys
26,942,982 Sell 300 Tree Growth Kits
'$'\r''Batteries were 82.71 each
' --funds 30000000 --start Gojuon --fuel 4 --batteries 87654
t ' 27,808,650 Buy 87654 Shield Batterys
27,358,650 Buy 300 Heating Units
27,358,650 Jump from Dune to Hothor (2 hyper jump units)
29,788,650 Sell 300 Heating Units
29,587,650 Buy 300 Medical Units
29,587,650 Jump from Hothor to HomeWorld (2 hyper jump units)
31,994,550 Sell 300 Medical Units
'$'\r''Batteries were 25.00 each
' --funds 30000000 --start Dune --fuel 4 --batteries 87654 --battery_price 18
t ' 29,472,000 Buy 300 Jewels
29,472,000 Jump from Dune to WeaponWorld (2 hyper jump units)
27,806,574 Buy 87654 Shield Batterys
29,356,374 Sell 300 Jewels
2,956,374 Buy 300 AntiCloak Scanners
2,956,374 Jump from WeaponWorld to Loony (2 hyper jump units)
31,846,374 Sell 300 AntiCloak Scanners
'$'\r''Batteries were 26.69 each
' --funds 30000000 --start Dune --fuel 4 --batteries 87654 --battery_price 19
t ' 29,472,000 Buy 300 Jewels
29,472,000 Jump from Dune to WeaponWorld (2 hyper jump units)
27,368,304 Buy 87654 Shield Batterys
28,918,104 Sell 300 Jewels
2,518,104 Buy 300 AntiCloak Scanners
2,518,104 Jump from WeaponWorld to Loony (2 hyper jump units)
31,408,104 Sell 300 AntiCloak Scanners
'$'\r''Batteries were 31.69 each
' --funds 30000000 --start Dune --fuel 4 --batteries 87654 --battery_price 24
t ' 30,000,000 Jump from Dune to Metallica (2 hyper jump units)
28,597,536 Buy 87654 Shield Batterys
2,497,536 Buy 300 AntiCloak Scanners
2,497,536 Jump from Metallica to Loony (2 hyper jump units)
31,387,536 Sell 300 AntiCloak Scanners
'$'\r''Batteries were 31.93 each
' --funds 30000000 --start Dune --fuel 4 --batteries 87654 --battery_price 25
t ' 30,000,000 Jump from Dune to Metallica (2 hyper jump units)
26,055,570 Buy 87654 Shield Batterys
42,570 Buy 299 AntiCloak Scanners
42,570 Jump from Metallica to Loony (2 hyper jump units)
28,836,270 Sell 299 AntiCloak Scanners
'$'\r''Batteries were 61.03 each
' --funds 30000000 --start Dune --fuel 4 --batteries 87654 --battery_price 68
t ' 29,871,900 Buy 300 Ground Weapons
29,871,900 Jump from Dune to Tribonia (2 hyper jump units)
31,717,200 Sell 300 Ground Weapons
31,534,800 Buy 300 Tree Growth Kits
31,534,800 Jump from Tribonia to Metallica (2 hyper jump units)
27,502,716 Buy 87654 Shield Batterys
28,746,816 Sell 300 Tree Growth Kits
'$'\r''Batteries were 62.05 each
' --funds 30000000 --start Dune --fuel 4 --batteries 87654 --battery_price 69
t ' 137,826,324 Buy 57423 Fighter Drones
111,126,324 Buy 300 AntiCloak Scanners
111,126,324 Jump from Norhaven to Loony (2 hyper jump units)
140,016,324 Sell 300 AntiCloak Scanners
139,566,324 Buy 300 Heating Units
139,566,324 Jump from Loony to Hothor (2 hyper jump units)
141,996,324 Sell 300 Heating Units
'$'\r''Drones were 212.00 each
' --funds 150000000 --start Norhaven --fuel 4 --drones 57423
t ' 149,802,000 Buy 300 Medical Units
149,802,000 Jump from Norhaven to HomeWorld (2 hyper jump units)
152,208,900 Sell 300 Medical Units
151,470,900 Buy 300 Jewels
151,470,900 Jump from HomeWorld to WeaponWorld (2 hyper jump units)
140,445,492 Buy 57424 Fighter Drones
141,995,292 Sell 300 Jewels
'$'\r''Drones were 212.01 each
' --funds 150000000 --start Norhaven --fuel 4 --drones 57424
t ' 137,306,550 Buy 60445 Fighter Drones
110,606,550 Buy 300 AntiCloak Scanners
110,606,550 Jump from Norhaven to Loony (2 hyper jump units)
139,496,550 Sell 300 AntiCloak Scanners
139,046,550 Buy 300 Heating Units
139,046,550 Jump from Loony to Hothor (2 hyper jump units)
141,476,550 Sell 300 Heating Units
'$'\r''Drones were 210.00 each
' --funds 150000000 --start Norhaven --fuel 4 --drones 60445 --drone_price 199
t ' 149,802,000 Buy 300 Medical Units
149,802,000 Jump from Norhaven to HomeWorld (2 hyper jump units)
152,208,900 Sell 300 Medical Units
151,470,900 Buy 300 Jewels
151,470,900 Jump from HomeWorld to WeaponWorld (2 hyper jump units)
139,925,714 Buy 60446 Fighter Drones
141,475,514 Sell 300 Jewels
'$'\r''Drones were 210.01 each
' --funds 150000000 --start Norhaven --fuel 4 --drones 60446 --drone_price 199
t ' 98,500,000 Buy 300 Device Of Cloakings
98,500,000 Jump from Earth to Volcana (1 hyper jump units)
101,024,500 Sell 300 Device Of Cloakings
100,943,500 Buy 300 Heating Units
100,943,500 Jump from Volcana to Richiana (1 hyper jump units)
103,227,700 Sell 300 Heating Units
103,227,700 Jump from Richiana to Eden (1 hyper jump units)
103,177,700 Buy 2 Eden Warp Units
103,028,700 Buy 298 Medical Units
103,028,700 Eden warp from Eden to HomeWorld
105,419,554 Sell 298 Medical Units
105,383,973 Buy 299 Ground Weapons
105,383,973 Jump from HomeWorld to Tribonia (1 hyper jump units)
107,223,122 Sell 299 Ground Weapons
107,041,330 Buy 299 Tree Growth Kits
107,041,330 Jump from Tribonia to Medoca (1 hyper jump units)
108,046,568 Sell 299 Tree Growth Kits
108,012,183 Buy 299 Medical Units
108,012,183 Jump from Medoca to Sickonia (1 hyper jump units)
110,374,881 Sell 299 Medical Units
109,926,381 Buy 299 Heating Units
109,926,381 Eden warp from Sickonia to Hothor
112,348,281 Sell 299 Heating Units
112,147,281 Buy 300 Medical Units
112,147,281 Jump from Hothor to Sickonia (2 hyper jump units)
114,517,881 Sell 300 Medical Units
114,067,881 Buy 300 Heating Units
114,067,881 Jump from Sickonia to Hothor (2 hyper jump units)
116,497,881 Sell 300 Heating Units
116,296,881 Buy 300 Medical Units
116,296,881 Jump from Hothor to Sickonia (2 hyper jump units)
118,667,481 Sell 300 Medical Units
118,217,481 Buy 300 Heating Units
118,217,481 Jump from Sickonia to Hothor (2 hyper jump units)
120,647,481 Sell 300 Heating Units
120,446,481 Buy 300 Medical Units
120,446,481 Jump from Hothor to HomeWorld (2 hyper jump units)
122,853,381 Sell 300 Medical Units
' --funds 100000000 --start Earth --flight_plan Volcana,Richiana,Eden,Tribonia,Medoca,Sickonia,Earth,Schooloria
t ' 99,550,000 Buy 300 Heating Units
99,550,000 Jump from Earth to Richiana (1 hyper jump units)
101,834,200 Sell 300 Heating Units
101,451,100 Buy 300 Novelty Packs
101,451,100 Jump from Richiana to Tribonia (1 hyper jump units)
102,999,700 Sell 300 Novelty Packs
102,719,200 Buy 300 Medical Units
102,719,200 Jump from Tribonia to Sickonia (1 hyper jump units)
105,089,800 Sell 300 Medical Units
104,639,800 Buy 300 Heating Units
104,639,800 Jump from Sickonia to Hothor (2 hyper jump units)
107,069,800 Sell 300 Heating Units
106,868,800 Buy 300 Medical Units
106,868,800 Jump from Hothor to Loony (1 hyper jump units)
108,384,700 Sell 300 Medical Units
108,294,400 Buy 300 Clothes Bundles
108,294,400 Jump from Loony to Dreamora (1 hyper jump units)
109,397,500 Sell 300 Clothes Bundles
109,267,000 Buy 300 Medical Units
109,267,000 Jump from Dreamora to Eden (1 hyper jump units)
109,402,000 Sell 300 Medical Units
109,352,000 Buy 2 Eden Warp Units
109,325,776 Buy 298 Tree Growth Kits
109,325,776 Eden warp from Eden to Zoolie
111,428,464 Sell 298 Tree Growth Kits
111,311,854 Buy 299 Medical Units
111,311,854 Eden warp from Zoolie to Sickonia
113,674,552 Sell 299 Medical Units
113,224,552 Buy 300 Heating Units
113,224,552 Jump from Sickonia to Hothor (2 hyper jump units)
115,654,552 Sell 300 Heating Units
115,453,552 Buy 300 Medical Units
115,453,552 Jump from Hothor to Sickonia (2 hyper jump units)
117,824,152 Sell 300 Medical Units
117,374,152 Buy 300 Heating Units
117,374,152 Jump from Sickonia to Hothor (2 hyper jump units)
119,804,152 Sell 300 Heating Units
119,603,152 Buy 300 Medical Units
119,603,152 Jump from Hothor to HomeWorld (2 hyper jump units)
122,010,052 Sell 300 Medical Units
' --funds 100000000 --start Earth --flight_plan Richiana,Tribonia,Sickonia,Uniland,StockWorld,Loony,Dreamora,Eden
t ' 99,895,000 Buy 300 Ground Weapons
99,895,000 Jump from Earth to Tribonia (1 hyper jump units)
101,740,300 Sell 300 Ground Weapons
101,557,900 Buy 300 Tree Growth Kits
101,557,900 Jump from Tribonia to Dune (1 hyper jump units)
103,480,300 Sell 300 Tree Growth Kits
103,480,300 Jump from Dune to Eden (1 hyper jump units)
103,430,300 Buy 2 Eden Warp Units
103,404,076 Buy 298 Tree Growth Kits
103,404,076 Jump from Eden to Medoca (1 hyper jump units)
104,405,952 Sell 298 Tree Growth Kits
104,371,682 Buy 298 Medical Units
104,371,682 Jump from Medoca to HomeWorld (1 hyper jump units)
106,762,536 Sell 298 Medical Units
106,519,964 Buy 298 Plastic Trinkets
106,519,964 Jump from HomeWorld to Baboria (1 hyper jump units)
108,346,406 Sell 298 Plastic Trinkets
108,197,406 Buy 298 Medical Units
108,197,406 Eden warp from Baboria to HomeWorld
110,588,260 Sell 298 Medical Units
110,422,913 Buy 299 Clothes Bundles
110,422,913 Jump from HomeWorld to Dreamora (1 hyper jump units)
111,522,336 Sell 299 Clothes Bundles
111,392,271 Buy 299 Medical Units
111,392,271 Eden warp from Dreamora to Sickonia
113,754,969 Sell 299 Medical Units
113,304,969 Buy 300 Heating Units
113,304,969 Jump from Sickonia to Hothor (1 hyper jump units)
115,734,969 Sell 300 Heating Units
115,533,969 Buy 300 Medical Units
115,533,969 Jump from Hothor to Sickonia (2 hyper jump units)
117,904,569 Sell 300 Medical Units
117,454,569 Buy 300 Heating Units
117,454,569 Jump from Sickonia to Hothor (2 hyper jump units)
119,884,569 Sell 300 Heating Units
119,683,569 Buy 300 Medical Units
119,683,569 Jump from Hothor to Sickonia (2 hyper jump units)
122,054,169 Sell 300 Medical Units
121,604,169 Buy 300 Heating Units
121,604,169 Jump from Sickonia to Hothor (2 hyper jump units)
124,034,169 Sell 300 Heating Units
' --funds 100000000 --start Earth --flight_plan Tribonia,Dune,Eden,Medoca,HomeWorld,Baboria,Dreamora,Hothor
t ' 99,550,000 Buy 300 Heating Units
99,550,000 Jump from Earth to Hothor (2 hyper jump units)
101,980,000 Sell 300 Heating Units
101,779,000 Buy 300 Medical Units
101,779,000 Jump from Hothor to HomeWorld (1 hyper jump units)
104,185,900 Sell 300 Medical Units
103,941,700 Buy 300 Plastic Trinkets
103,941,700 Jump from HomeWorld to Baboria (1 hyper jump units)
105,780,400 Sell 300 Plastic Trinkets
105,630,400 Buy 300 Medical Units
105,630,400 Jump from Baboria to Sickonia (2 hyper jump units)
108,001,000 Sell 300 Medical Units
107,551,000 Buy 300 Heating Units
107,551,000 Jump from Sickonia to Hothor (2 hyper jump units)
109,981,000 Sell 300 Heating Units
109,780,000 Buy 300 Medical Units
109,780,000 Jump from Hothor to Sickonia (2 hyper jump units)
112,150,600 Sell 300 Medical Units
111,700,600 Buy 300 Heating Units
111,700,600 Jump from Sickonia to Hothor (2 hyper jump units)
114,130,600 Sell 300 Heating Units
113,929,600 Buy 300 Medical Units
113,929,600 Jump from Hothor to Sickonia (2 hyper jump units)
116,300,200 Sell 300 Medical Units
115,850,200 Buy 300 Heating Units
115,850,200 Jump from Sickonia to Hothor (2 hyper jump units)
118,280,200 Sell 300 Heating Units
' --funds 100000000 --start Earth --flight_plan 'HugeLind Mar,Dreamora,HomeWorld,Baboria,Dogafetch,StockWorld,Volcana,Gojuon'
t ' 99,850,000 Buy 300 Medical Units
99,850,000 Jump from Earth to Sickonia (2 hyper jump units)
102,220,600 Sell 300 Medical Units
101,770,600 Buy 300 Heating Units
101,770,600 Jump from Sickonia to Hothor (2 hyper jump units)
104,200,600 Sell 300 Heating Units
103,999,600 Buy 300 Medical Units
103,999,600 Jump from Hothor to Sickonia (2 hyper jump units)
106,370,200 Sell 300 Medical Units
105,920,200 Buy 300 Heating Units
105,920,200 Jump from Sickonia to Hothor (2 hyper jump units)
108,350,200 Sell 300 Heating Units
108,149,200 Buy 300 Medical Units
108,149,200 Jump from Hothor to Sickonia (2 hyper jump units)
110,519,800 Sell 300 Medical Units
110,069,800 Buy 300 Heating Units
110,069,800 Jump from Sickonia to Hothor (2 hyper jump units)
112,499,800 Sell 300 Heating Units
112,298,800 Buy 300 Medical Units
112,298,800 Jump from Hothor to Sickonia (2 hyper jump units)
114,669,400 Sell 300 Medical Units
114,219,400 Buy 300 Heating Units
114,219,400 Jump from Sickonia to Hothor (2 hyper jump units)
116,649,400 Sell 300 Heating Units
' --funds 100000000 --start Earth --flight_plan 'StockWorld,Norhaven,Metallica,Plague,WeaponWorld,Dogafetch,Baboria,Uniland'
t ' 850,000 Buy 300 Medical Units
850,000 Jump from Earth to HomeWorld (2 hyper jump units)
3,256,900 Sell 300 Medical Units
2,518,900 Buy 300 Jewels
2,518,900 Jump from HomeWorld to WeaponWorld (2 hyper jump units)
4,068,700 Sell 300 Jewels
28 Buy 21191 Fighter Drones
'$'\r''Drones were 245.41 each
' --funds 1000000 --start Earth --fuel 4 --drones 21191
impossible="$(mktemp)"
t '' --funds 1000000 --start Earth --fuel 4 --drones 21192 2> "$impossible"
grep -q 'Cannot acheive success criteria' "$impossible" || exit_status=1
rm "$impossible"
exit "$exit_status"
|
chkno/planeteer
|
test.sh
|
Shell
|
agpl-3.0
| 20,881 |
#!/bin/bash
if [[ $TRAVIS_OS_NAME == 'osx' ]]; then
HOMEBREW_NO_AUTO_UPDATE=1 brew install curl nettle libmicrohttpd libuv
git clone https://github.com/Storj/libstorj.git
cd libstorj
./autogen.sh
./configure
sudo make install
else
echo "deb http://us.archive.ubuntu.com/ubuntu/ xenial main" | sudo tee -a /etc/apt/sources.list
echo "deb http://us.archive.ubuntu.com/ubuntu/ xenial universe" | sudo tee -a /etc/apt/sources.list
echo "deb http://us.archive.ubuntu.com/ubuntu/ xenial-updates universe" | sudo tee -a /etc/apt/sources.list
echo "deb http://us.archive.ubuntu.com/ubuntu/ xenial-updates multiverse" | sudo tee -a /etc/apt/sources.list
echo "deb http://security.ubuntu.com/ubuntu xenial-security main" | sudo tee -a /etc/apt/sources.list
echo "deb http://security.ubuntu.com/ubuntu xenial-security multiverse" | sudo tee -a /etc/apt/sources.list
sudo apt-get update -qq
sudo apt-get install build-essential libtool autotools-dev automake libmicrohttpd-dev
sudo apt-get install libcurl4-gnutls-dev nettle-dev libjson-c-dev libuv1-dev
git clone https://github.com/Storj/libstorj.git
cd libstorj
./autogen.sh
./configure --prefix=/usr
sudo make install
fi
|
Storj/node-libstorj
|
.travis.sh
|
Shell
|
lgpl-2.1
| 1,243 |
#!/bin/bash
set -o errexit
cd cpp
mkdir -p dist
cd dist
cmake ..
make -j5
cd ../..
|
nikki-and-the-robots/nikki
|
src/build-qtwrapper.sh
|
Shell
|
lgpl-3.0
| 85 |
set -x
g++ -std=c++11 main.cpp -o cpusage-DBG -O0 -g -Wall
g++ -std=c++11 main.cpp -o cpusage -O3 -s -Wall
|
vivaladav/BitsOfBytes
|
cpp-program-to-get-cpu-usage-from-command-line-in-linux/build.sh
|
Shell
|
unlicense
| 107 |
#!/bin/sh
echo "Make sure you are in desired folder (e.g. ~/f-droid.org/) prior to running this script."
echo "You may also want to modify 'repo_url' on fdroid_backup_aio.py if you wish to mirror a site besides f-droid.org"
echo "Downloading latest F-Droid files to current directory..."
python fdroid_backup_aio.py
wait
mkdir repo
wait
cd repo/
wget -nc --content-disposition --trust-server-names -i ../download_apk.txt
wait
mkdir icons
cd icons
wget -nc --content-disposition --trust-server-names -i ../../download_icons.txt
wait
cd ../..
wait
mkdir sources
wait
cd sources/
wait
wget -nc --content-disposition --trust-server-names -i ../download_sources.txt
wait
cd ../
wait
mv -t repo/ index.xml index.jar categories.txt latestapps.dat
wait
## Cleanup working directory.
rm download_apk.txt apk.txt download_icons.txt icons.txt download_sources.txt
wait
echo "Downloads finished, you should have a working backup of the f-droid mirror"
|
g4jc/fdroid-backup
|
fdroid_fresh.sh
|
Shell
|
unlicense
| 941 |
#!/bin/bash
sleep 20 && conky;
|
cybert79/HaXor
|
dot-files/conkyscript.sh
|
Shell
|
unlicense
| 31 |
#!/bin/sh
#
# git_checkout.sh
#
# part of pfSense (https://www.pfsense.org)
# Copyright (c) 2004-2018 Rubicon Communications, LLC (Netgate)
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
scripts_path=$(dirname $(realpath $0))
if [ ! -f "${scripts_path}/common.subr" ]; then
echo >&2 "ERROR: common.subr is missing"
exit 1
fi
. ${scripts_path}/common.subr
usage() {
cat >&2 <<END
Usage: $(basename $0) -r repo_url -d destdir [-b branch] [-h]
Options:
-r repo_url -- URL of desired git repository
-d destdir -- Directory to clone
-b branch -- Branch or tag to clone (default: master)
-h -- Show this help and exit
Environment:
GIT_BIN -- Path to git binary
END
exit 1
}
branch="master"
while getopts r:d:b:h opt; do
case "$opt" in
r)
repo_url=$OPTARG
;;
d)
destdir=$OPTARG
;;
b)
branch=$OPTARG
;;
*)
usage
;;
esac
done
[ -z "$repo_url" ] \
&& err "repository URL is not defined"
[ -z "$destdir" ] \
&& err "destdir is not defined"
[ -e $destdir -a ! -d $destdir ] \
&& err "destdir already exists and is not a directory"
git=${GIT_BIN:-$(which git)}
if [ ! -x "${git}" ]; then
err "git binary is missing"
fi
if [ -d "${destdir}/.git" ]; then
current_url=$(${git} -C ${destdir} config --get remote.origin.url)
[ "${current_url}" != "${repo_url}" ] \
&& err \
"destination directory contains a different git repository"
run "Removing local changes from git repo ${repo_url} (${branch})" \
"${git} -C ${destdir} reset -q --hard"
run "Removing leftovers from git repo ${repo_url} (${branch})" \
"${git} -C ${destdir} clean -qfd"
run "Retrieving updates from git repo ${repo_url} (${branch})" \
"${git} -C ${destdir} fetch -q origin"
run "Updating git repo ${repo_url} (${branch})" \
"git -C ${destdir} checkout -q ${branch}"
# Detect if it's a branch and rebase it
if ${git} -C ${destdir} show-ref -q --verify refs/heads/${branch}; then
run "Rebasing git repo ${repo_url} (${branch})" \
"git -C ${destdir} rebase -q origin/${branch}"
fi
else
run "Cloning git repository ${repo_url} (${branch})" \
"git clone -q -b ${branch} ${repo_url} ${destdir}"
fi
exit 0
|
ptorsten/pfsense
|
build/scripts/git_checkout.sh
|
Shell
|
apache-2.0
| 2,752 |
#!/bin/bash -e
set -beEu -o pipefail
echo "CREATE HOLDOUT PATCH!"
if [ $# -eq 1 ]
then
version_string="AND version = $1"
else
version_string=""
fi
GP_CUTOFF=`cat ../results_log/gp_cutoff`
cd ..
source env_local.sh
deepdive sql """
SELECT
labeler,
true_pos,
true_neg,
false_pos,
false_neg,
(true_pos::float / CASE WHEN (true_pos::float + false_pos::float) <> 0 THEN (true_pos::float + false_pos::float) ELSE 1 END) as precision,
(true_pos::float / CASE WHEN (true_pos::float + false_neg::float) <> 0 THEN (true_pos::float + false_neg::float) ELSE 1 END) as recall
FROM (
SELECT
COALESCE(fp.labeler, tp.labeler, fn.labeler, tn.labeler) labeler,
CASE WHEN tp.tp IS NULL THEN 0 ELSE tp.tp END true_pos,
CASE WHEN fp.fp IS NULL THEN 0 ELSE fp.fp END false_pos,
CASE WHEN fn.fn IS NULL THEN 0 ELSE fn.fn END false_neg,
CASE WHEN tn.tn IS NULL THEN 0 ELSE tn.tn END true_neg
FROM
(SELECT
labeler,
COUNT(DISTINCT s.relation_id) fp
FROM
genepheno_causation_no_charite gc
RIGHT JOIN genepheno_causation_labels s
ON (s.relation_id = gc.relation_id)
WHERE
gc.is_correct = 't'
AND gc.supertype NOT LIKE '%CHARITE%'
AND s.is_correct = 'f'
$version_string
GROUP BY labeler) fp
FULL OUTER JOIN
(SELECT
labeler,
COUNT(DISTINCT s.relation_id) tp
FROM
genepheno_causation_no_charite gc
RIGHT JOIN genepheno_causation_labels s
ON (s.relation_id = gc.relation_id)
WHERE
gc.is_correct = 't'
AND s.is_correct = 't'
$version_string
GROUP BY labeler) tp
ON (fp.labeler = tp.labeler)
FULL OUTER JOIN
(SELECT
labeler,
COUNT(DISTINCT s.relation_id) fn
FROM
genepheno_causation_no_charite gc
RIGHT JOIN genepheno_causation_labels s
ON (s.relation_id = gc.relation_id)
WHERE
(gc.is_correct != 't' or gc.is_correct is null)
AND s.is_correct = 't'
$version_string
GROUP BY labeler) fn
ON (fn.labeler = COALESCE(fp.labeler, tp.labeler))
FULL OUTER JOIN
(SELECT
labeler,
COUNT(DISTINCT s.relation_id) tn
FROM
genepheno_causation_no_charite gc
RIGHT JOIN genepheno_causation_labels s
ON (s.relation_id = gc.relation_id)
WHERE
(gc.is_correct != 't' or gc.is_correct is null)
AND s.is_correct = 'f'
$version_string
GROUP BY labeler) tn
ON (tn.labeler = COALESCE(fp.labeler, tp.labeler, fn.labeler))) a
ORDER BY labeler;
"""
|
HazyResearch/dd-genomics
|
util/gp_raw_stats.sh
|
Shell
|
apache-2.0
| 2,413 |
#!/usr/bin/env bash
if [ "`head -1 $0`" != "#!/usr/bin/env bash" ]; then
echo "Shebang of the original file isn't the first line of the produced artefact" >&2
exit 1
fi
|
thought-machine/please
|
test/sh_rules/shebang.sh
|
Shell
|
apache-2.0
| 179 |
#!/bin/bash
set -uxf -o pipefail
usage() {
echo $"Usage: $0 <cassandra_version> <schema_version>"
exit 1
}
check_arg() {
if [ ! $# -eq 2 ]; then
echo "ERROR: need exactly two arguments, <cassandra_version> <schema_version>"
usage
fi
}
setup_cassandra() {
local tag=$1
local image=cassandra
local params=(
--rm
--detach
--publish 9042:9042
--publish 9160:9160
)
local cid=$(docker run ${params[@]} ${image}:${tag})
echo ${cid}
}
teardown_cassandra() {
local cid=$1
docker kill ${cid}
exit ${exit_status}
}
apply_schema() {
local image=cassandra-schema
local schema_dir=plugin/storage/cassandra/
local schema_version=$1
local params=(
--rm
--env CQLSH_HOST=localhost
--env CQLSH_PORT=9042
--env "TEMPLATE=/cassandra-schema/${schema_version}.cql.tmpl"
--network host
)
docker build -t ${image} ${schema_dir}
docker run ${params[@]} ${image}
}
run_integration_test() {
local version=$1
local schema_version=$2
local cid=$(setup_cassandra ${version})
apply_schema "$2"
STORAGE=cassandra make storage-integration-test
exit_status=$?
trap 'teardown_cassandra ${cid}' EXIT
}
main() {
check_arg "$@"
echo "Executing integration test for $1 with schema $2.cql.tmpl"
run_integration_test "$1" "$2"
}
main "$@"
|
jaegertracing/jaeger
|
scripts/cassandra-integration-test.sh
|
Shell
|
apache-2.0
| 1,313 |
#!/bin/bash
#
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
for project in "$@"
do
echo "Preparing data project ${project} .."
# Tagging Dispatcher needs to know the location of datasets
gcloud projects add-iam-policy-binding "${project}" \
--member="serviceAccount:${SA_TAGGING_DISPATCHER_EMAIL}" \
--role="roles/bigquery.metadataViewer"
# Tagger needs to read table schema and update tables policy tags
gcloud projects add-iam-policy-binding "${project}" \
--member="serviceAccount:${SA_TAGGER_EMAIL}" \
--role="roles/bigquery.dataOwner"
done
|
GoogleCloudPlatform/bq-pii-classifier
|
scripts/prepare_data_projects_for_auto_dlp_mode.sh
|
Shell
|
apache-2.0
| 1,116 |
#!/bin/bash
UNITTESTS=1
DEPCHECK=1
MINCOVERAGE=85
set -e # quit on error
# allow user to skip parts of docker test
# this wrapper script only cares about -n, -u, -i, others passed to test suite.
while getopts "cdijknsux" o $FAUCET_TESTS; do
case "${o}" in
i)
# run only integration tests
UNITTESTS=0
DEPCHECK=0
;;
n)
# skip code check
DEPCHECK=0
;;
u)
# skip unit tests
UNITTESTS=0
;;
*)
;;
esac
done
cd /faucet-src
if [ -d /var/tmp/pip-cache ] ; then
echo Using pip cache
cp -r /var/tmp/pip-cache /var/tmp/pip-cache-local
fi
./docker/pip_deps.sh "--cache-dir=/var/tmp/pip-cache-local"
./docker/workarounds.sh
echo "========== checking IPv4/v6 localhost is up ====="
ping6 -c 1 ::1
ping -c 1 127.0.0.1
echo "========== Starting OVS ========================="
export OVS_LOGDIR=/usr/local/var/log/openvswitch
/usr/local/share/openvswitch/scripts/ovs-ctl start
ovs-vsctl show
ovs-vsctl --no-wait set Open_vSwitch . other_config:max-idle=50000
# Needed to support double tagging.
ovs-vsctl --no-wait set Open_vSwitch . other_config:vlan-limit=2
cd /faucet-src/tests
./sysctls_for_tests.sh || true
# TODO: need to force UTF-8 as POSIX causes python3/pytype errors.
locale-gen en_US.UTF-8
export LANG=en_US.UTF-8
export LANGUAGE=en_US.en
export LC_ALL=en_US.UTF-8
export PYTHONPATH=/faucet-src:/faucet-src/faucet:/faucet-src/clib
if [ "$UNITTESTS" == 1 ] ; then
echo "========== Running faucet unit tests =========="
cd /faucet-src/tests
time ./run_unit_tests.sh
fi
if [ "$DEPCHECK" == 1 ] ; then
echo "========== Building documentation =========="
cd /faucet-src/docs
time make html
rm -rf _build
cd /faucet-src/tests/codecheck
echo "============ Running pylint analyzer ============"
time ./pylint.sh
echo "============ Running pytype analyzer ============"
time ./pytype.sh
fi
echo "========== Starting docker container =========="
service docker start || true
echo "========== Running faucet system tests =========="
test_failures=
export FAUCET_DIR=/faucet-src/faucet
export http_proxy=
cd /faucet-src/tests/integration
./mininet_main.py -c
time ./mininet_main.py $FAUCET_TESTS || test_failures+=" mininet_main"
cd /faucet-src/clib
time ./clib_mininet_test.py $FAUCET_TESTS || test_failures+=" clib_mininet_test"
if [ -n "$test_failures" ]; then
echo Test failures: $test_failures
exit 1
fi
echo Done with faucet system tests.
|
trentindav/faucet
|
docker/runtests.sh
|
Shell
|
apache-2.0
| 2,579 |
#!/usr/bin/env bash
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python ./scripts/action-recognition/train_recognizer.py \
--dataset kinetics400 \
--data-dir /home/ubuntu/data/kinetics400/train_256 \
--val-data-dir /home/ubuntu/data/kinetics400/val_256 \
--train-list /home/ubuntu/data/kinetics400/k400_train_240618.txt \
--val-list /home/ubuntu/data/kinetics400/k400_val_19761_cleanv3.txt \
--mode hybrid \
--dtype float32 \
--prefetch-ratio 1.0 \
--video-loader \
--use-decord \
--model i3d_inceptionv1_kinetics400 \
--num-classes 400 \
--batch-size 8 \
--num-gpus 8 \
--num-data-workers 32 \
--input-size 224 \
--new-height 256 \
--new-width 340 \
--new-length 32 \
--new-step 2 \
--lr-mode step \
--lr 0.01 \
--momentum 0.9 \
--wd 0.0001 \
--lr-decay 0.1 \
--lr-decay-epoch 40,80,100 \
--num-epochs 100 \
--scale-ratios 1.0,0.8 \
--save-frequency 20 \
--log-interval 50 \
--logging-file i3d_inceptionv1_kinetics400.log
|
dmlc/web-data
|
gluoncv/logs/action_recognition/kinetics400/i3d_inceptionv1_kinetics400.sh
|
Shell
|
apache-2.0
| 1,040 |
#!/bin/bash
source ~/.bashrc
echo Host name: `hostname`
umask 002
if [ $# -lt 1 ]
then
echo "Usage: wrapper.sh <command>"
echo "Where <command> is the command to be executed by the SGE"
else
echo "Executing command: $@"
exec "$@"
fi
|
romanzenka/swift
|
swift/scripts/src/main/resources/bin/util/sgeWrapper.sh
|
Shell
|
apache-2.0
| 241 |
#!/bin/bash
# Function to update the fpm configuration to make the service environment variables available
function setEnvironmentVariable() {
if [ -z "$2" ]; then
echo "Environment variable '$1' not set."
return
fi
# Check whether variable already exists
if grep -q $1 /etc/php5/fpm/pool.d/www.conf; then
# Reset variable
sed -i "s/^env\[$1.*/env[$1] = $2/g" /etc/php5/fpm/pool.d/www.conf
else
# Add variable
echo "env[$1] = $2" >> /etc/php5/fpm/pool.d/www.conf
fi
}
# Grep for variables that look like docker set them (_PORT_)
for _curVar in `env | grep _PORT_ | awk -F = '{print $1}'`;do
# awk has split them by the equals sign
# Pass the name and value to our function
setEnvironmentVariable ${_curVar} ${!_curVar}
done
source /etc/apache2/envvars
service php5-fpm start && apache2ctl -e debug -D FOREGROUND >> /var/log/apache.log 2>&1
|
crollalowis/docker-apache-php5
|
run.sh
|
Shell
|
apache-2.0
| 927 |
#!/usr/bin/env bash
# prepare_NA12878.sh: download NA12878 reads and prepare chr22-related reads
# We need to include all the reads on chr22 or an associated alt, but also any
# reads that are pair partners with those reads. We will keep them all in paired
# FASTQs.
set -ex
# Download the CRAM
wget --progress=dot:giga ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/illumina_platinum_pedigree/data/CEU/NA12878/alignment/NA12878.alt_bwamem_GRCh38DH.20150706.CEU.illumina_platinum_ped.cram -O NA12878.alt_bwamem_GRCh38DH.20150706.CEU.illumina_platinum_ped.cram
# Sort reads by name, so pair partners are together
samtools sort -n NA12878.alt_bwamem_GRCh38DH.20150706.CEU.illumina_platinum_ped.cram -o NA12878.byname.bam
# Go through in name order and collect pairs where one end touches chr22 or a related alt
samtools view NA12878.byname.bam | awk '{if ($3 ~ /chr22(_.*)?$/ || $7 ~ /chr22(_.*)?$/) print}' | ./scripts/smartSam2Fastq.py --fq1 NA12878.chr22related.R1.fastq --fq2 NA12878.chr22related.R2.fastq --drop_secondary --expect_paired
|
adamnovak/hgvm-builder
|
scripts/prepare_NA12878.sh
|
Shell
|
apache-2.0
| 1,057 |
#!/bin/bash
USER_SVM_PATH="$HOME/.svm"
TEMP_PATH="/tmp"
#
# helper functions
#
_svm_title_message() {
local message="$1"
echo -e "\n\033[1;4m $message \033[0m\n"
}
_svm_info_message() {
local message="$1"
echo -e " $message "
}
_svm_error_message() {
local message="$1"
echo -e "\033[41m $message \033[0m"
}
_svm_isinstalled() {
type "$1" > /dev/null 2>&1
return $?
}
_svminstaller_create_install_location() {
local install_path="$1"
local paths=""
if [ -d "$install_path" ]; then
_svm_info_message "An existing svm installation was found at '$install_path'. This will be upgraded."
paths=`find "$install_path" -mindepth 1 -maxdepth 1 \( ! -name "versions" ! -name "version" \)`
for path in ${paths[@]}; do
if [[ $path == $USER_SVM_PATH* ]]; then rm -fr $path; fi
done
else
_svm_info_message "Creating svm install location at '$install_path'."
mkdir "$install_path"
fi
}
_svminstaller_download_package() {
local package_url="$1"
local download_path="$2"
_svm_info_message "Downloading svm install package from '$package_url'."
local httpResult=$(curl -D - -L --create-dirs -o "$download_path" "$package_url" 2>/dev/null | grep "^HTTP/1.1" | tail -n 1 | sed "s/HTTP.1.1 \([0-9]*\).*/\1/")
if [ $httpResult != "200" ];then
_svm_error_message "The svm install package could not be downloaded from '$package_url'."
echo "" && exit
fi
}
_svminstaller_install_package() {
local download_path="$1"
local install_path="$2"
_svm_info_message "Installing svm to '$install_path'."
unzip -j "$download_path" "svm-0.4.1/src/bin/*" -d "$install_path/bin" > /dev/null 2>&1
unzip -j "$download_path" "svm-0.4.1/src/shims/*" -d "$install_path/shims" > /dev/null 2>&1
mkdir "$install_path/versions" > /dev/null 2>&1
# remove Windows specific resources from installed package
find "$install_path" \( -name "*.cmd" -or -name "*.ps1" \) -exec rm -f {} \;
# remove download folder - with safety check that path starts with temp folder
download_folder="${download_path%/*}"
if [[ $download_folder == $TEMP_PATH* ]]; then rm -fr $download_folder; fi
}
_svminstaller_configure_environment() {
local install_path="$1"
local path=""
local path_bin=""
local path_shims=""
_svm_info_message "Configuring path environment variables for svm."
path="$PATH"
path_bin="$install_path/bin"
path_shims="$install_path/shims"
# remove any existing instances of svm folders from PATH
path=":$path:"
path=${path/:$path_bin:/:}
path=${path/:$path_shims:/:}
path=${path%:}
path=${path#:}
local startup_file=""
# write to .bash_profile if bash is installed
startup_file="$HOME/.bash_profile"
if _svm_isinstalled "bash"; then
if [[ $(grep '# scriptcs version manager' "$startup_file" -s) ]]; then
_svm_info_message "Existing entry in .bash_profile file found at '$startup_file'. File will not be modified."
else
_svm_info_message "Writing entry into .bash_profile file found at '$startup_file'."
echo '' >> "$startup_file"
echo '# scriptcs version manager' >> "$startup_file"
echo '. $HOME/.svm_profile' >> "$startup_file"
fi
fi
# write to .zshrc if zsh is installed
startup_file="$HOME/.zshrc"
if _svm_isinstalled "zsh"; then
if [[ $(grep '# scriptcs version manager' "$startup_file" -s) ]]; then
_svm_info_message "Existing entry in .zshrc file found at '$startup_file'. File will not be modified."
else
_svm_info_message "Writing entry into .zshrc file found at '$startup_file'."
echo '' >> "$startup_file"
echo '# scriptcs version manager' >> "$startup_file"
echo '. $HOME/.svm_profile' >> "$startup_file"
fi
fi
# write to .svm_profile
startup_file="$HOME/.svm_profile"
if [ -f "$startup_file" ]; then
_svm_info_message "Overwriting .svm_profile file at '$startup_file'."
else
_svm_info_message "Creating .svm_profile file at '$startup_file'."
fi
echo '# scriptcs version manager' > "$startup_file"
echo 'export PATH="$HOME/.svm/bin:$HOME/.svm/shims:$PATH"' >> "$startup_file"
# set execute file attribute on shell scripts
chmod 644 "$HOME/.svm_profile"
chmod 755 "$USER_SVM_PATH/bin/svm"
chmod 755 "$USER_SVM_PATH/shims/scriptcs"
}
#
# installer
#
_svm_title_message "scriptcs version manager - installer"
svm_install_path="$USER_SVM_PATH"
svm_package_url="https://github.com/scriptcs-contrib/svm/archive/v0.4.1.zip"
svm_download_path="$TEMP_PATH/`uuidgen`/svm-install.zip"
_svminstaller_create_install_location "$svm_install_path"
_svminstaller_download_package "$svm_package_url" "$svm_download_path"
_svminstaller_install_package "$svm_download_path" "$svm_install_path"
_svminstaller_configure_environment "$svm_install_path"
_svm_info_message "Successfully installed!"
_svm_info_message "\nRun 'svm help' to get started."
echo ""
|
modulexcite/svm
|
install/installer.sh
|
Shell
|
apache-2.0
| 4,772 |
#!/usr/bin/env bash
# Copyright 2019 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
source $(dirname $0)/../vendor/knative.dev/hack/codegen-library.sh
# If we run with -mod=vendor here, then generate-groups.sh looks for vendor files in the wrong place.
export GOFLAGS=-mod=
echo "=== Update Codegen for $MODULE_NAME"
group "Update deps post-codegen"
# Make sure our dependencies are up-to-date
${REPO_ROOT_DIR}/hack/update-deps.sh
|
knative-sandbox/net-http01
|
hack/update-codegen.sh
|
Shell
|
apache-2.0
| 1,003 |
#!/bin/bash
cd ..
mvn package
cd docker
if [ ! -f "lib" ]; then
mkdir lib
fi
echo "Copying libraries..."
cp ../accumulo-mesos-framework/target/accumulo-mesos-framework-0.2.0-SNAPSHOT-jar-with-dependencies.jar lib/
cp ../accumulo-mesos-dist/target/accumulo-mesos-dist-0.2.0-SNAPSHOT.tar.gz lib/
|
aredee/accumulo-mesos
|
docker/build-framwork.sh
|
Shell
|
apache-2.0
| 298 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.