code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
# Author: Andrey Dibrov (andry at inbox dot ru)
# Bash file library, supports common file functions.
# Script can be ONLY included by "source" command.
[[ -z "$BASH" || (-n "$BASH_LINENO" && BASH_LINENO[0] -le 0) || (-n "$SOURCE_CONTOOLS_FILELIB_SH" && SOURCE_CONTOOLS_FILELIB_SH -ne 0) ]] && return
SOURCE_CONTOOLS_FILELIB_SH=1 # including guard
if [[ -z "$SOURCE_TACKLELIB_BASH_TACKLELIB_SH" || SOURCE_TACKLELIB_BASH_TACKLELIB_SH -eq 0 ]]; then
# builtin search
for BASH_SOURCE_DIR in "/usr/local/bin" "/usr/bin" "/bin"; do
[[ -f "$BASH_SOURCE_DIR/bash_tacklelib" ]] && {
source "$BASH_SOURCE_DIR/bash_tacklelib" || exit $?
break
}
done
fi
tkl_include_or_abort '__init__.sh'
tkl_include_or_abort "$TACKLELIB_BASH_ROOT/tacklelib/baselib.sh"
tkl_include_or_abort "$CONTOOLS_BASH_ROOT/traplib.sh"
tkl_include_or_abort "$CONTOOLS_BASH_ROOT/stringlib.sh"
tkl_include_or_abort "$CONTOOLS_BASH_ROOT/regexplib.sh"
function BufferedRead()
{
local bufSize="$1"
local bufVarName="$2"
local bufStateArrayName="$3"
# update buffer state
eval "$bufStateArrayName[0]=0"
local IFS='' # enables read whole string line into a single variable
read -n "$bufSize" -r -d '' "$bufVarName"
eval "(( \${#$bufVarName} == bufSize ))" && return 0
return 1
}
# Return codes:
# 0 - no EOF, buffer line has characters terminates by the line split string.
# 1 - EOF, buffer line has characters terminated by EOF or by split string
# which itself early-terminated by EOF.
# Details:
# The buffer state has format:
# [0] - buffer position index after last line read.
# [1] - beginning of a line in the buffer after last line read.
# [2] - length of a line in the buffer after last line read.
# [3] - number of the line split string characters read before the next line
# or before the EOF if the line split string was early-terminated by
# the EOF.
# For example, if the last line of a file is not terminate by the line split
# string characters, then the [3] buffer state would be 0 and the function
# would return 1. In another case, if the last line of a file is terminated by
# a beginning of the line split string and the EOF after, then the [3] buffer
# state would be a length of a beginning of the line split string before the
# EOF and the function still would return 1. In all other cases where the line
# split string completely read after the line, the [3] buffer state would be
# full length of the line split string and the function would return 0.
# WARNING:
# 1. Because the line split string could be longer than 1 character, then
# it could be early-terminated by the EOF, so you could gain the line split
# string characters as a part of the line. To intercept that case you need
# to check the [3] buffer state after each buffer line read. If it is not
# equal to 0 or length of the line split string then in the next buffer line
# read a beginning of the line would contain an ending characters of the
# line split string!
# 2. The function does not trace the order of a characters in the line split
# string if the line split string longer than 1 character, so it is only a
# user responsibility to control it order. For example, if the line split
# string is a sequence of the "\n\r", then a sequence of the "\r\n" does
# NOT treat as the line split string and the "\r" character will append
# to the end of the line, but the "\n" character will append too only if no
# the "\r" character or the EOF after it.
#
# EXAMPLE OF USAGE: BufferedRead+BufferedReadLine+BufferedEcho+FlushEchoBuffer:
# BufferedReadError=0
# BufferedLineReadError=0
# stdoutBuf=""
# stdinBuf=""
# stdinLine=""
# bufStateArray=(0)
#
# DoReadBuf=1
# while (( DoReadBuf )); do
# BufferedRead 512 stdinBuf bufStateArray
# BufferedReadError=$?
#
# DoReadBufLines=1
# while (( DoReadBufLines )); do
# BufferedReadLine stdinBuf bufStateArray
# BufferedLineReadError=$?
#
# BufferedEcho stdoutBuf 512 "${stdinBuf:${bufStateArray[1]}:${bufStateArray[2]}}"
# if (( ${bufStateArray[3]} )); then
# BufferedEcho stdoutBuf 512 $'\n'
# fi
#
# (( ! BufferedLineReadError )) || DoReadBufLines=0
# done
#
# (( ! BufferedReadError )) || DoReadBuf=0
# done
# FlushEchoBuffer stdoutBuf
function BufferedReadLine()
{
local bufVarName="$1"
local bufStateArrayName="$2"
local lineSplitStr="${3-$'\n'}"
local lineSplitStrLen="${#lineSplitStr}"
local IFS=$' \t'
eval declare "bufSize=\"\${#$bufVarName}\""
# read buffer state from array
eval declare "bufPos=\"\${$bufStateArrayName[0]:-0}\""
local i
local subStr
for (( i=bufPos; i<bufPos+bufSize; i++ )); do
eval "subStr=\"\${$bufVarName:\$i:\$lineSplitStrLen}\""
case "$subStr" in
"$lineSplitStr")
# complete line read w/o EOF hit
eval "$bufStateArrayName=(\"\$((i+lineSplitStrLen))\" \
\"\$bufPos\" \"\$((i-bufPos))\" \"\$lineSplitStrLen\")"
return 0
;;
*)
if (( ${#subStr} < lineSplitStrLen )); then
case "$lineSplitStr" in
"$subStr"*)
# the line split string early-terminated by the EOF
eval "$bufStateArrayName=(\"\$((i+\${#subStr}))\" \
\"\$bufPos\" \"\$((i-bufPos))\" \"\${#subStr}\")"
return 1
;;
esac
fi
;;
esac
done
# EOF
eval "$bufStateArrayName=(\"\$i\" \"\$bufPos\" \"\$bufSize\" 0)"
return 1
}
function BufferedEcho()
{
local bufVarName="$1"
local bufMaxSize="$2"
local outputText="$3"
local outputTextLen="${#outputText}"
eval declare "bufSize=\"\${#$bufVarName}\""
if (( bufMaxSize >= bufSize+outputTextLen )); then
eval "$bufVarName=\"\$$bufVarName\$outputText\""
return 0
fi
eval echo -n "\"\$$bufVarName\""
local LastError=$?
if (( bufMaxSize >= outputTextLen )); then
eval "$bufVarName=\"\$outputText\""
else
echo -n "$outputText"
(( ! bufSize )) || eval "$bufVarName=''"
fi
return $LastError
}
function FlushEchoBuffer()
{
local bufVarName="$1"
eval declare "bufSize=\"\${#$bufVarName}\""
if (( bufSize )) ; then
eval echo -n "\"\$$bufVarName\""
eval "$bufVarName=''"
return 0
fi
return 1
}
function GetFilePath()
{
# drop return value
RETURN_VALUE=""
local FilePathLink="$1"
[[ -n "$FilePathLink" ]] || return 1
case "$OSTYPE" in
"msys") local WhichUtility='/bin/which' ;;
*) local WhichUtility='/bin/which.exe' ;;
esac
local FilePathLinkFileName="${FilePathLink##*[/\\]}"
if [[ "$FilePathLinkFileName" == "$FilePathLink" ]]; then
# file path link is file name. Search file name in search paths
RETURN_VALUE="`"$WhichUtility" "$FilePathLinkFileName" 2>/dev/null`"
else
# convert before call to readlink!
ConvertNativePathToBackend "$FilePathLink"
fi
[[ ! -f "$RETURN_VALUE" ]] || return 0
# file name is not found in search paths, construct absolute file path from file name
RETURN_VALUE="`/bin/readlink.exe -m "$FilePathLink"`"
if [[ ! -f "$RETURN_VALUE" ]]; then
# still is not found, return input path link
RETURN_VALUE="$FilePathLink"
return 1
fi
return 0
}
function GetAbsolutePathFromDirPath()
{
# drop return value
RETURN_VALUE=""
local DirPath="$1"
local RelativePath="$2"
# drop line returns
DirPath="${DirPath//[$'\r\n']}"
RelativePath="${RelativePath//[$'\r\n']}"
# WORKAROUND:
# Because some versions of readlink can not handle windows native absolute
# paths correctly, then always try to convert directory path to a backend
# path before the readlink in case if the path has specific native path
# characters.
if [[ "${DirPath:1:1}" == ":" ]]; then
ConvertNativePathToBackend "$DirPath"
DirPath="$RETURN_VALUE"
fi
if [[ -n "$DirPath" ]]; then
if [[ -x "/bin/readlink" ]]; then
if [[ "${RelativePath:0:1}" != '/' ]]; then
RETURN_VALUE=$(/bin/readlink -m "$DirPath${RelativePath:+/}$RelativePath")
else
RETURN_VALUE=$(/bin/readlink -m "$RelativePath")
fi
else
return 1
fi
else
return 2
fi
}
function ConvertBackendPathToNative()
{
# cygwin/msys2 uses cygpath command to convert paths
# msys/mingw uses old style conversion through the "cmd.exe ^/C" call
# drop return value
RETURN_VALUE="$1"
local LastError=0
local PathToConvert="$1"
local Flags="$2"
local ConvertedPath=""
if [[ "${Flags/i/}" != "$Flags" ]]; then
# w/ user mount points bypassing
ExctractPathIgnoringUserMountPoints -w "$PathToConvert"
LastError=$?
fi
if [[ "${Flags/s/}" != "$Flags" ]]; then
# convert backslashes to slashes
RETURN_VALUE="${RETURN_VALUE//\\//}"
fi
if [[ "${Flags/i/}" != "$Flags" ]]; then
return $LastError
fi
[[ -n "$PathToConvert" ]] || return 1
GetAbsolutePathFromDirPath "$PathToConvert" || return 2
case "$OSTYPE" in
msys* | mingw*)
while true; do
# in msys2 and higher we must use /bin/cygpath.exe to convert the path
if [[ "$OSTYPE" == "msys" && -f "/bin/cygpath.exe" ]]; then
ConvertedPath="`/bin/cygpath.exe -w "$RETURN_VALUE"`"
break
fi
local ComSpecInternal="${COMSPEC//\\//}" # workaround for a "command not found" in the msys shell
# msys replaces mount point path properly if it ends by '/' character
RETURN_VALUE="${RETURN_VALUE%/}/"
EscapeString "$RETURN_VALUE" '' 2
# msys automatically converts argument to the native path if it begins from '/' character
ConvertedPath="`"$ComSpecInternal" '^/C' \(echo $RETURN_VALUE\)`"
break
done
;;
cygwin*)
ConvertedPath="`/bin/cygpath.exe -w "$RETURN_VALUE"`"
;;
*)
if [[ "${Flags/s/}" != "$Flags" ]]; then
# convert backslashes to slashes
RETURN_VALUE="${RETURN_VALUE//\\//}"
fi
return 0
;;
esac
# remove last slash
ConvertedPath="${ConvertedPath%[/\\]}"
if [[ "${Flags/s/}" != "$Flags" ]]; then
# convert backslashes to slashes
RETURN_VALUE="${ConvertedPath//\\//}"
else
# convert all slashes to backward slashes
RETURN_VALUE="${ConvertedPath//\//\\}"
fi
return 0
}
function ConvertNativePathToBackend()
{
# drop return value
RETURN_VALUE="$1"
# Convert all back slashes to slashes.
local PathToConvert="${1//\\//}"
[[ -n "$PathToConvert" ]] || return 1
# workaround for the bash 3.1.0 bug for the expression "${arg:X:Y}",
# where "Y == 0" or "Y + X >= ${#arg}"
local PathToConvertLen=${#PathToConvert}
local PathPrefixes=('' '')
local PathSuffix=""
if (( PathToConvertLen > 0 )); then
PathPrefixes[0]="${PathToConvert:0:1}"
fi
if (( PathToConvertLen > 1 )); then
PathPrefixes[1]="${PathToConvert:1:1}"
fi
if (( PathToConvertLen >= 3 )); then
PathSuffix="${PathToConvert:2}"
PathSuffix="${PathSuffix%/}"
fi
# Convert path drive prefix too.
if [[ "${PathPrefixes[0]}" != '/' && "${PathPrefixes[0]}" != '.' && "${PathPrefixes[1]}" == ':' ]]; then
case "$OSTYPE" in
cygwin*) PathToConvert="/cygdrive/${PathPrefixes[0]}$PathSuffix" ;;
*)
PathToConvert="/${PathPrefixes[0]}$PathSuffix"
# add slash to the end of path in case of drive only path
(( ! ${#PathSuffix} )) && PathToConvert="$PathToConvert/"
;;
esac
fi
RETURN_VALUE="$PathToConvert"
return 0
}
function ConvertNativePathListToBackend()
{
# drop return value
RETURN_VALUE="$1"
local PathListToConvert="$1"
[[ -n "$PathListToConvert" ]] || return 1
# Convert all back slashes to slashes.
local ConvertedPathList="${PathListToConvert//\\//}"
# workaround for the bash 3.1.0 bug for the expression "${arg:X:Y}",
# where "Y == 0" or "Y + X >= ${#arg}"
local PathToConvertLen
local PathPrefixes=('' '')
local PathSuffix=""
local arg
local IFS=';'
for arg in $ConvertedPathList; do
PathToConvertLen=${#arg}
if (( PathToConvertLen > 0 )); then
PathPrefixes[0]="${arg:0:1}"
fi
if (( PathToConvertLen > 1 )); then
PathPrefixes[1]="${arg:1:1}"
fi
if (( PathToConvertLen >= 3 )); then
PathSuffix="${arg:2}"
fi
# Convert path drive prefix too.
if [[ "${PathPrefixes[0]}" != '/' && "${PathPrefixes[0]}" != '.' && "${PathPrefixes[1]}" == ':' ]]; then
case "$OSTYPE" in
cygwin*) RETURN_VALUE="$RETURN_VALUE${RETURN_VALUE:+:}/cygdrive/${PathPrefixes[0]}${PathSuffix}" ;;
*) RETURN_VALUE="$RETURN_VALUE${RETURN_VALUE:+:}/${PathPrefixes[0]}${PathSuffix}" ;;
esac
else
RETURN_VALUE="$RETURN_VALUE${RETURN_VALUE:+${arg:+:}}$arg"
fi
done
return 0
}
function RemoveRelativePathsFromPathListVariable()
{
local VarName="$1"
eval local "Var=\"\$$VarName\""
local NewPaths
local arg
# remove relative paths from search paths
local IFS=':'
for arg in $Var; do
if [[ "${arg:0:1}" == '/' ]]; then
NewPaths="$NewPaths${NewPaths:+${arg:+:}}$arg"
fi
done
eval "$VarName=\"\$NewPaths\""
}
# Extracts real backend path with mount points application.
# Useful before call to "mkdir -p" which could create nesting directories
# by wrong address. For example, if call "mkdir -p /usr/local/install/blabla"
# and "/usr/local/install/blabla" is mounted somewhere, then it creates
# subdirectories not upto mount point but as is from the root - "/".
function ExtractBackendPath()
{
# Trick it with convertion to native path and back
ConvertBackendPathToNative "$1" && ConvertNativePathToBackend "$RETURN_VALUE"
}
function CanonicalNativePath()
{
# Trick it with convertion to backend path and back w/ bypassing user mount
# points
ConvertNativePathToBackend "$1" && ConvertBackendPathToNative "$RETURN_VALUE" -i
}
# the same as ExtractBackendPath, but bypassing user mount points table except
# builtin mounts
function ExctractPathIgnoringUserMountPoints()
{
# Splits the path into 2 paths by exracting builtin paths from the beginning of
# the path in this order:
# "/usr/bin" => "/usr/lib" => "/usr" => "/lib" => "/<drive>/" => "/"
# That is because, the Cygwin backend has the redirection of
# "/usr/bin" and "/usr/lib" into "/bin" and "/lib" paths respectively, but
# doesn't has the redirection of the "/usr" itself, when the Msys backend has
# the redirection of the "/usr" path to the "/" but does not has for the
# "/usr/bin" path.
# Examples:
# 1. path=/usr/bin => prefix=/usr/bin/ suffix=
# 2. path=/usr/lib => prefix=/usr/lib/ suffix=
# 3. path=/usr => prefix=/usr/ suffix=
# 4. path=/lib => prefix=/lib/ suffix=
# 5. path=/usr/local/bin => prefix=/usr/ suffix=local/bin
# 6. path=/tmp => prefix=/ suffix=tmp
# Specific to Msys behaviour:
# 7. path=/c/ => prefix=/c/ suffix=
# 8. path=/c => prefix=/ suffix=c
# Specific to Cygwin behaviour:
# 9. path=/cygdrive/c => prefix=/cygdrive/c suffix=
local Flags="$1"
if [[ "${Flags:0:1}" == '-' ]]; then
shift
else
Flags=''
fi
local PathToConvert="$1"
# drop return value
RETURN_VALUE=""
(( ${#PathToConvert} )) || return 1
local DoConvertToBackendTypePath=1
if [[ "${Flags//w/}" != "$Flags" ]]; then
DoConvertToBackendTypePath=0 # convert to native path
elif [[ "${Flags//b/}" != "$Flags" ]]; then # explicit flag
DoConvertToBackendTypePath=1 # convert to backend path
fi
# enable nocase match
local oldShopt=""
function LocalReturn()
{
if [[ -n "$oldShopt" ]]; then
# Restore state
eval $oldShopt
fi
unset -f "${FUNCNAME[0]}" # drop function after execution
}
# override RETURN with other traps restore
PushTrapFunctionMove "$DefaultTrapsStackName" LocalReturn RETURN || return 253
oldShopt="$(shopt -p nocasematch)" # Read state before change
if [[ "$oldShopt" != "shopt -s nocasematch" ]]; then
shopt -s nocasematch
else
oldShopt=''
fi
# The case patterns w/o * ending character.
# If / character at the end then it is required.
local PathPrefixes=(/usr/bin /usr/lib /usr /lib '/[a-zA-Z]/' '/cygdrive/[a-zA-Z]' /)
local PathPrefix
local PathSuffix
local IsFound=0
for PathPrefix in "${PathPrefixes[@]}"; do
PathSuffix="${PathToConvert#$PathPrefix}"
if [[ "$PathSuffix" != "$PathToConvert" ]] &&
[[ -z "$PathSuffix" || "${PathSuffix:0:1}" == '/' || "${PathPrefix%/}" != "$PathPrefix" ]]; then
IsFound=1
PathPrefix="${PathToConvert%$PathSuffix}"
break
fi
done
if (( ! IsFound )); then
PathPrefix="$PWD" # current path as base if builtin is not found
PathSuffix="$PathToConvert"
fi
PathPrefix="${PathPrefix%/}/" # forward slash at the end
PathSuffix="${PathSuffix#/}" # no forward slash at the begin
PathSuffix="${PathSuffix%/}" # no forward slash at the end
local ConvertedPath
# bypassing mounting points
case "$OSTYPE" in
msys* | mingw*)
while true; do
# in msys2 and higher we must use /bin/cygpath.exe to convert the path
if [[ "$OSTYPE" == "msys" && -f "/bin/cygpath.exe" ]]; then
ConvertedPath="`/bin/cygpath.exe -w "$RETURN_VALUE"`"
break
fi
local ComSpecInternal="${COMSPEC//\\//}" # workaround for a "command not found" in the msys shell
# msys replaces mount point path properly if it ends by '/' character
RETURN_VALUE="${PathPrefix%/}/"
EscapeString "$RETURN_VALUE" '' 2
# msys automatically converts argument to the native path if it begins from '/' character
ConvertedPath="$("$ComSpecInternal" '^/C' \(echo $RETURN_VALUE\))"
break
done
;;
cygwin*)
ConvertedPath="`/bin/cygpath.exe -w "$PathPrefix"`"
;;
*)
RETURN_VALUE="${PathPrefix%/}${PathSuffix:+/}$PathSuffix"
return 0
;;
esac
# remove last slash
ConvertedPath="${ConvertedPath%[/\\]}"
# convert to declared path type with replacemant of all backward slashes
if (( DoConvertToBackendTypePath )); then
ConvertNativePathToBackend "${ConvertedPath//\//\\}" || return 3
RETURN_VALUE="$RETURN_VALUE${PathSuffix:+/}$PathSuffix"
else
RETURN_VALUE="${ConvertedPath//\\//}${PathSuffix:+/}$PathSuffix"
fi
return 0
}
function GetRelativePathFromAbsolutePaths()
{
# drop return value
RETURN_VALUE=""
local LastError
local AbsolutePath="$1"
local AbsoluteFromPath="$2"
local AbsolutePathDiff
local AbsoluteFromDiff
local arg
if (( ${#AbsolutePath} )) && [[ "${AbsolutePath:0:1}" != '/' ]]; then
GetAbsolutePathFromDirPath "$AbsolutePath"
AbsolutePath="$RETURN_VALUE"
fi
if (( ${#AbsoluteFromPath} )) && [[ "${AbsoluteFromPath:0:1}" != '/' ]]; then
GetAbsolutePathFromDirPath "$AbsoluteFromPath"
AbsoluteFromPath="$RETURN_VALUE"
fi
AbsolutePathDiff="${AbsolutePath#/}"
AbsoluteFromDiff="${AbsoluteFromPath#/}"
local ch
local hasCommonPath=0
local IFS=$'\r\n' # enables string split only by line return characters and non printable characters may become part of name
for arg in ${AbsolutePathDiff//\//$'\n'}; do
[[ -n "$arg" && -n "$AbsoluteFromDiff" ]] || break
ch="${AbsoluteFromDiff:${#arg}:1}"
if [[ "${AbsoluteFromDiff:0:${#arg}}" == "$arg" && ( -z "$ch" || "$ch" == '/' ) ]]; then
AbsolutePathDiff="${AbsolutePathDiff:${#arg}+1}"
AbsoluteFromDiff="${AbsoluteFromDiff:${#arg}+1}"
hasCommonPath=1
else
break
fi
done
if (( hasCommonPath )); then
RETURN_VALUE=""
for arg in ${AbsoluteFromDiff//\//$'\n'}; do
[[ -n "$arg" ]] || break
RETURN_VALUE="$RETURN_VALUE${RETURN_VALUE:+/}.."
done
RETURN_VALUE="$RETURN_VALUE${RETURN_VALUE:+${AbsolutePathDiff:+/}}$AbsolutePathDiff"
else
RETURN_VALUE="/$AbsolutePathDiff"
fi
RETURN_VALUE="${RETURN_VALUE%/}"
return 0
}
function NormalizePath()
{
# drop return value
RETURN_VALUE=""
local InputPath="$1"
# Convert native path to backend path before call to the readlink.
ConvertNativePathToBackend "$InputPath" || return 1
# Normalization through "readlink.exe" utility, path may not exist.
RETURN_VALUE="`/bin/readlink.exe -m "$RETURN_VALUE"`"
return 0
}
function GetExitingFilePaths()
{
# drop return value
RETURN_VALUE=()
local FromPath="$1"
shift
local FilePath
for FilePath in "$@"; do
if [[ "${FilePath:0:1}" != "/" ]]; then
[[ -f "$FromPath${FromPath:+${FilePath:+/}}$FilePath" ]] && RETURN_VALUE[${#RETURN_VALUE[@]}]="$FilePath"
else
[[ -f "$FilePath" ]] && RETURN_VALUE[${#RETURN_VALUE[@]}]="$FilePath"
fi
done
}
function GetExitingPaths()
{
# drop return value
RETURN_VALUE=()
local FromPath="$1"
shift
local FilePath
for FilePath in "$@"; do
if [[ "${FilePath:0:1}" != "/" ]]; then
[[ -f "$FromPath${FromPath:+${FilePath:+/}}$FilePath" || \
-d "$FromPath${FromPath:+${FilePath:+/}}$FilePath" ]] && RETURN_VALUE[${#RETURN_VALUE[@]}]="$FilePath"
else
[[ -f "$FilePath" || -d "$FilePath" ]] && RETURN_VALUE[${#RETURN_VALUE[@]}]="$FilePath"
fi
done
}
function ReadFileToVar()
{
local FilePath="$1"
local VarName="$2"
[[ -n "$VarName" ]] || return 1
# drop variable value
eval "$VarName=''"
GetFilePath "$FilePath"
FilePath="$RETURN_VALUE"
[[ -f "$FilePath" ]] || return 2
eval "$VarName=\"`IFS=''; cat \"\$FilePath\"`\""
return 0
}
function MakeCommandArgumentsFromFile()
{
# drop return value
RETURN_VALUE=""
local FilePath="$1"
local DoEval="${2:-0}"
if [[ "$FilePath" != '-' ]]; then
FilePath="`/bin/readlink.exe -m "$FilePath"`"
[[ -f "$FilePath" ]] || return 1
fi
local ConfigString=""
function InternalRead()
{
local i
local ConfigLine=""
local IgnoreLine=0
local IsEscapedSequence=0
local ConfigLineLen
local IFS='' # enables read whole string line into a single variable
while read -r ConfigLine; do
IsEscapedSequence=0
IgnoreLine=0
ConfigLineLen="${#ConfigLine}"
for (( i=0; i<ConfigLineLen; i++ )); do
case "${ConfigLine:i:1}" in
$'\n') ;;
$'\r') ;;
\\)
if (( ! IsEscapedSequence )); then
IsEscapedSequence=1
else
IsEscapedSequence=0
fi
;;
\#)
if (( ! IsEscapedSequence )); then
IgnoreLine=1
ConfigLine="${ConfigLine:0:i}"
break
else
IsEscapedSequence=0
fi
;;
*)
(( ! IsEscapedSequence )) || IsEscapedSequence=0
;;
esac
(( ! IgnoreLine )) || break
done
ConfigLine="${ConfigLine#"${ConfigLine%%[^[:space:]]*}"}" # remove beginning whitespaces
ConfigLine="${ConfigLine%"${ConfigLine##*[^[:space:]]}"}" # remove ending whitespaces
# remove last backslash
if (( ${#ConfigLine} )) && [[ "${ConfigLine:${#ConfigLine}-1:1}" == '\' ]]; then #'
ConfigLine="${ConfigLine:0:${#ConfigLine}-1}"
fi
if [[ -n "$ConfigLine" ]]; then
if (( DoEval )); then
EscapeString "$ConfigLine" '"' 0
eval ConfigLine=\"$RETURN_VALUE\"
fi
EscapeString "$ConfigLine" '' 1
ConfigLine="$RETURN_VALUE"
ConfigString="$ConfigString${ConfigString:+" "}'${ConfigLine}'"
fi
done
}
if [[ "$FilePath" != '-' ]]; then
InternalRead < "$FilePath"
else
InternalRead
fi
RETURN_VALUE="$ConfigString"
return 0
}
# safe as mkdir but avoids mount points in the path to directory
function MakeDir()
{
local ArgsArr
ArgsArr=("$@")
local DirPath
local i
# update all arguments which are not begins by the '-' character
for (( i=0; i<${#ArgsArr[@]}; i++ )); do
DirPath="${ArgsArr[i]}"
if [[ -n "$DirPath" && "${DirPath:0:1}" != '-' ]]; then
ExtractBackendPath "$DirPath"
ArgsArr[i]="$RETURN_VALUE"
#[[ ! -e "$RETURN_VALUE" ]] && echo "MKDIR: $RETURN_VALUE"
fi
done
# call mkdir with updated arguments list
mkdir "${ArgsArr[@]}"
}
function CopyFiles()
{
local InputDirPath
local OutputDirPath
local NumArgs
local FromIndex
local CopyFlags="$1"
if [[ -n "$CopyFlags" && "${CopyFlags:0:1}" != '-' ]]; then
CopyFlags=''
else
shift
fi
InputDirPath="$1"
OutputDirPath="$2"
NumArgs="$#"
FromIndex=2
GetShiftOffset $(( FromIndex )) "$@" || shift $?
GetAbsolutePathFromDirPath "$InputDirPath"
InputDirPath="$RETURN_VALUE"
GetAbsolutePathFromDirPath "$OutputDirPath"
OutputDirPath="$RETURN_VALUE"
local LastError=0
local i
if (( FromIndex < NumArgs )); then
for (( i=FromIndex; i<NumArgs; i++ )); do
if [[ -n "$1" ]]; then
if [[ ! -e "$OutputDirPath" ]]; then
MakeDir -p "$OutputDirPath"
fi
cp $CopyFlags "$InputDirPath/$1" "$OutputDirPath/$1"
(( LastError |= $? ))
fi
shift
done
else
CopyDirImpl $CopyFlags "$InputFilePath" "$OutputDirPath"
(( LastError |= $? ))
fi
return $LastError
}
function CopyDirImpl()
{
local CopyFlags="$1"
local InputDirPath="$2"
local OutputDirPath="$3"
local ExtractedOutputDirPath
local file
local isVerbose=1
[[ "${CopyFlags//v/}" == "$CopyFlags" ]] && isVerbose=0
local isSilent=0
[[ "${CopyFlags//s/}" == "$CopyFlags" ]] && isSilent=1
(
trap "exit 254" INT # enable interruption while in loop
shopt -s dotglob # to enable file names beginning by a dot
pushd "$InputDirPath" >/dev/null 2>&1 && {
MakeDir -p "$OutputDirPath"
ExtractedOutputDirPath="$RETURN_VALUE"
if ! (( isSilent )); then
echo "\`$InputDirPath/' -> \`$ExtractedOutputDirPath/'"
fi
for file in *; do
cp "-R${CopyFlags#-}" "$InputDirPath/$file" "$ExtractedOutputDirPath" || exit $?
# if verbose, then do echo only for copied directories to reduce echo abuse
if (( isVerbose )) && [[ -d "$InputDirPath/$file" ]]; then
echo "\`$InputDirPath/$file/'"
fi
done
}
)
return $?
}
function CopyDir()
{
local CopyFlags="$1"
local InputDirPath
local OutputDirPath
if [[ -n "$CopyFlags" && "${CopyFlags:0:1}" != '-' ]]; then
CopyFlags=''
InputDirPath="$1"
OutputDirPath="$2"
else
InputDirPath="$2"
OutputDirPath="$3"
fi
GetAbsolutePathFromDirPath "$InputDirPath"
InputDirPath="$RETURN_VALUE"
GetAbsolutePathFromDirPath "$OutputDirPath"
OutputDirPath="$RETURN_VALUE"
CopyDirImpl "$CopyFlags" "$InputDirPath" "$OutputDirPath"
}
function MoveDirImpl()
{
local MoveFlags="$1"
local InputDirPath="$2"
local OutputDirPath="$3"
local ExtractedOutputDirPath
local file
local isVerbose=1
[[ "${MoveFlags//v/}" == "$MoveFlags" ]] && isVerbose=0
local isSilent=0
[[ "${MoveFlags//s/}" == "$MoveFlags" ]] && isSilent=1
(
trap "exit 254" INT # enable interruption while in loop
shopt -s dotglob # to enable file names beginning by a dot
pushd "$InputDirPath" >/dev/null 2>&1 && {
MakeDir -p "$OutputDirPath"
ExtractedOutputDirPath="$RETURN_VALUE"
if ! (( isSilent )); then
echo "\`$InputDirPath/' -> \`$ExtractedOutputDirPath/'"
fi
for file in *; do
mv "-${MoveFlags#-}" "$InputDirPath/$file" "$ExtractedOutputDirPath" || exit $?
# if verbose, then do echo only for copied directories to reduce echo abuse
if (( isVerbose )) && [[ -d "$InputDirPath/$file" ]]; then
echo "\`$InputDirPath/$file/'"
fi
done
}
)
return $?
}
function MoveDir()
{
local MoveFlags="$1"
local InputDirPath
local OutputDirPath
if [[ -n "$MoveFlags" && "${MoveFlags:0:1}" != '-' ]]; then
MoveFlags=''
InputDirPath="$1"
OutputDirPath="$2"
else
InputDirPath="$2"
OutputDirPath="$3"
fi
GetAbsolutePathFromDirPath "$InputDirPath"
InputDirPath="$RETURN_VALUE"
GetAbsolutePathFromDirPath "$OutputDirPath"
OutputDirPath="$RETURN_VALUE"
MoveDirImpl "$MoveFlags" "$InputDirPath" "$OutputDirPath"
}
function CleanupDir()
{
local CleanupFlags="$1"
local DirPath
if [[ -n "$CleanupFlags" && "${CleanupFlags:0:1}" != '-' ]]; then
CleanupFlags=''
DirPath="$1"
else
CleanupFlags="$1"
DirPath="$2"
shift
fi
shift
[[ -d "$DirPath" ]] || return 1
local FilesToRemoveArr
local DirsToRemoveArr
FilesToRemoveArr=()
DirsToRemoveArr=()
# implementation function which does not run in a child shell process and calls to itself
function CleanupDirImpl()
{
local BaseDirPath="$1"
shift
local FilePath
local arg
for arg in "$@"; do
if [[ -n "$arg" && "$arg" != '*' ]]; then
FilePath="$BaseDirPath/$arg"
#echo "$FilePath"
if [[ -f "$FilePath" || -h "$FilePath" ]]; then
FilesToRemoveArr[${#FilesToRemoveArr[@]}]="$FilePath"
#rm -f "$FilePath" || return 2
elif [[ -d "$FilePath" ]]; then
pushd "$arg" >/dev/null && \
{
CleanupDirImpl "$FilePath" * || return 1
DirsToRemoveArr[${#DirsToRemoveArr[@]}]="$FilePath"
popd >/dev/null
#rmdir "$FilePath" || return 3
}
fi
fi
done
return 0
}
# to automatically unroll a child process current directories stack
(
trap "exit 254" INT # enable interruption while in loop
shopt -s dotglob # to enable file names beginning by a dot
# ignore errors if directory doen't exist, suppress stderr
pushd "$DirPath" >/dev/null 2>&1 && \
{
CleanupDirImpl "$PWD" * || exit 1
# do remove files at first
if (( ${#FilesToRemoveArr[@]} )); then
rm "-f${CleanupFlags#-}" "${FilesToRemoveArr[@]}" || exit 2
fi
if (( ${#DirsToRemoveArr[@]} )); then
rmdir $CleanupFlags "${DirsToRemoveArr[@]}" || exit 3
fi
}
exit 0
)
return $?
}
function TouchFiles()
{
local DirPath="${1:-.}"
shift
# implementation function which does not run in a child shell process and calls to itself
function TouchFilesImpl()
{
local BaseDirPath="$1"
shift
local FilePath
local arg
for arg in "$@"; do
if [[ -n "$arg" ]]; then
FilePath="$BaseDirPath/$arg"
if [[ -f "$FilePath" || -h "$FilePath" ]]; then
touch -a "$FilePath"
elif [[ -d "$FilePath" ]]; then
pushd "$arg" >/dev/null && \
{
TouchFilesImpl "$BaseDirPath" *
popd >/dev/null
}
fi
fi
done
return 0
}
# to automatically unroll the child process current directories stack
(
trap "exit 254" INT # enable interruption while in loop
shopt -s dotglob # to enable file names beginning by a dot
# ignore errors if directory doen't exist, don't suppress stderr
pushd "$DirPath" >/dev/null && \
{
TouchFilesImpl "$PWD" "$@" || exit 1
}
exit 0
)
return $?
}
function ReadFileDependents()
{
# drop return value
RETURN_VALUE=()
local FilePath="$1"
[[ -f "$FilePath" ]] || return 1
# -1 - for debugging only
# 1 - use dumpbin.exe
# 2 - use objdump.exe
local DumpbType=2
ConvertBackendPathToNative "$FilePath"
(( ! $? )) || return 2
local NativeFilePath="$RETURN_VALUE"
# drop return value
RETURN_VALUE=()
function InternalDumpbinRead()
{
local StrLine
local DoReadDeps=0
local DoBreak=0
local HasDeps=0
local IFS='' # enables read whole string line into a single variable
while read -r StrLine; do
case "$StrLine" in
*'dependencies:')
DoReadDeps=1
;;
*)
if (( DoReadDeps )); then
if (( HasDeps )); then
if [[ -z "${StrLine//[[:space:]]/}" ]]; then
local DoBreak=1
break
fi
fi
if [[ -n "${StrLine//[[:space:]]/}" ]]; then
HasDeps=1
echo "${StrLine:4}"
fi
fi
;;
esac
(( DoBreak )) && break
done
}
function InternalObjdumpRead()
{
grep "DLL Name:" | \
{
local StrLine
local IFS='' # enables read whole string line into a single variable
while read -r StrLine; do
[[ -n "${StrLine//[[:space:]]/}" ]] && echo "${StrLine#*:[[:space:]]}"
done
}
}
function InternalSimpleRead()
{
local StrLine
local IFS='' # enables read whole string line into a single variable
while read -r StrLine; do
[[ -n "${StrLine//[[:space:]]/}" ]] && RETURN_VALUE[${#RETURN_VALUE[@]}]="$StrLine"
done
}
local EvalString=""
case "$DumpbType" in
1)
EvalString="$(
"$CONTOOLS_UTILITIES_BIN_ROOT/Microsoft/dumpbin.exe" -dependents "$NativeFilePath" | \
InternalDumpbinRead | tr '[:upper:]' '[:lower:]' | \
{ MakeCommandArgumentsFromFile - && echo -n "$RETURN_VALUE"; }
)"
[[ -n "$EvalString" ]] && eval "RETURN_VALUE=($EvalString)"
;;
2)
EvalString="$(
"$CONTOOLS_UTILITIES_BIN_ROOT/mingw/bin/objdump.exe" -p "$NativeFilePath" | \
InternalObjdumpRead | tr '[:upper:]' '[:lower:]' | \
{ MakeCommandArgumentsFromFile - && echo -n "$RETURN_VALUE"; }
)"
[[ -n "$EvalString" ]] && eval "RETURN_VALUE=($EvalString)"
;;
-1)
InternalSimpleRead < "$FilePath"
;;
esac
return 0
}
function GetFileDependantsByDirPath()
{
# drop return value
RETURN_VALUE=()
local DirPath="$1"
local Extensions="${2:-"exe dll so"}"
[[ -d "$DirPath" ]] || return 1
local FilePath
local ext
local IFS=$' \t' # enables string split only by non printable characters back
for ext in $Extensions; do
IFS=$'\r\n' # enables string split only by line return characters and non printable characters may become part of name
for FilePath in `find "$DirPath" -type f -iname "*.$ext" 2>/dev/null`; do
[[ -n "${FilePath//[[:space:]]/}" ]] && RETURN_VALUE[${#RETURN_VALUE[@]}]="$FilePath"
done
done
return 0
}
function DirectoryLsIterator()
{
function InternalRead()
{
local PredicateFunc="$1"
GetShiftOffset 1 "$@" || shift $?
#local DirPathFromRoot="$2"
#local NestingIndex="$3"
local LsLine=''
local IFS='' # enables read whole string line into a single variable
local LastError=0
while read -r LsLine; do
case "$LsLine" in
?[r\-][w\-][x\-][r\-][w\-][x\-][r\-][w\-][x\-][[:blank:]\+]*)
# MatchString function is slow for bash versions less than 3.2 because of external regexp emulation
if MatchString '' "$LsLine" '([^[:blank:]]+)[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+([^[:blank:]]+)?[[:blank:]]+(.+)'; then
"$PredicateFunc" "${#BASH_REMATCH[@]}" "${BASH_REMATCH[@]}" "$@"
LastError=$?
if (( LastError )); then # stop iterating
#echo "!!! $LastError" >&2
return $LastError
fi
fi
;;
*)
# echo "!!!$LsLine!!!"
;;
esac
done
return $LastError
}
# implementation function which does not run in a child shell process and calls to itself
function DirectoryLsIteratorImpl()
{
local DirPathFromRoot="$1"
local DirPath="$2"
local Flags="$3"
local NestingIndex="${4:-0}"
local PredicateFunc="$5"
GetShiftOffset 5 "$@" || shift $?
#echo "PWD=$PWD"
#echo "DirPathFromRoot=$DirPathFromRoot"
#echo "DirPath=$DirPath"
#echo "NestingIndex=$NestingIndex"
[[ -d "$DirPath" ]] || return 1
[[ -n "$PredicateFunc" ]] || return 2
local DoRecursion=0
[[ "${Flags//r/}" != "$Flags" ]] && DoRecursion=1
local LastError=0
local PipeErrorsArr
PipeErrorsArr=()
if (( ! DoRecursion )); then
# process target directory itself
#echo "1: DirPath=$DirPath"
ls -ld "$DirPath" | sort -f --key=1.1d,1 --key=9 | InternalRead "$PredicateFunc" "$DirPathFromRoot" "$NestingIndex"
LastError=$?
PipeErrorsArr=("${PIPESTATUS[@]}") # unknown bash bug: workaround for the "elif (( ${PIPESTATUS[0]:-0} ))" construction
if (( LastError )); then
LastError=5
elif (( ${PipeErrorsArr[0]:-0} )); then
LastError=6
elif (( ${PipeErrorsArr[1]:-0} )); then
LastError=7
elif (( ${PipeErrorsArr[2]:-0} )); then
LastError=8
fi
else
#echo "2: DirPath=$DirPath"
# process target directory content
pushd "$DirPath" 2>&1 >/dev/null && \
{
if (( ! $? )); then
ls -al | sort -f --key=1.1d,1 --key=9 | InternalRead "$PredicateFunc" "$DirPathFromRoot" "$NestingIndex"
LastError=$?
PipeErrorsArr=("${PIPESTATUS[@]}") # unknown bash bug: workaround for the "elif (( ${PIPESTATUS[0]:-0} ))" construction
if (( LastError )); then
LastError=11
elif (( ${PipeErrorsArr[0]:-0} )); then
LastError=12
elif (( ${PipeErrorsArr[1]:-0} )); then
LastError=13
elif (( ${PipeErrorsArr[2]:-0} )); then
LastError=14
fi
else
LastError=10
fi
popd 2>&1 >/dev/null
}
fi
return $LastError
}
# run recursion in a child process to later safely terminate it if something goes wrong
(
trap "exit 254" INT # enable interruption while in loop
DirectoryLsIteratorImpl "$@"
)
return $?
}
function PrintDirectoryPermissions()
{
local RootDirPath="${1:-.}"
function IteratorPredicateFunc1()
{
local ChmodPerms=''
local LsFileField=''
local RelativeFilePath=''
local Padding12=' '
local LastError=0
local IFS=$' \t\r\n' # workaround for the bug in the "[@]:i" expression under the bash version lower than 4.1
(( ${#@} >= 3 )) && ChmodPerms="$3"
(( ${#@} > 3 )) && LsFileField="${@:$1+1:1}"
local BasePath="${@:$1+2:1}"
local NestingIndex="${@:$1+3:1}"
# parse the ls output for the "<filelinkname>" from the file name field in the "<filelinkname> -> <filepath>" format
if (( ${#LsFileField} )); then
RelativeFilePath="${LsFileField%[[:blank:]]->[[:blank:]]*}"
fi
if (( NestingIndex )); then
# remove '/' prefix from the path if parent path was not absolute
if [[ "${RootDirPath:0:1}" != '/' ]]; then
BasePath="${BasePath#/}"
fi
# the "." and ".." subdirectories in the unix is a part of any directory, ignore them to avoid infinite recursion
if [[ "$RelativeFilePath" == '.' || "$RelativeFilePath" == '..' ]]; then
return 0
fi
fi
local ChmodPermsAligned=''
if (( ${#ChmodPerms} < 11 )); then
ChmodPermsAligned="${Padding12:0:11-${#ChmodPerms}}"
fi
if [[ "${ChmodPerms:0:1}" != 'd' ]]; then
echo "$ChmodPerms$ChmodPermsAligned $BasePath${BasePath:+/}$RelativeFilePath"
else
echo "$ChmodPerms$ChmodPermsAligned $BasePath${BasePath:+/}$RelativeFilePath/:"
# do parse directories recursively
if [[ "${RelativeFilePath:0:1}" != '/' ]]; then
DirectoryLsIteratorImpl "$BasePath${BasePath:+/}$RelativeFilePath" "./$RelativeFilePath" -r $((NestingIndex+1)) IteratorPredicateFunc1 "$@"
else
DirectoryLsIteratorImpl "$RelativeFilePath" "$RelativeFilePath" -r $((NestingIndex+1)) IteratorPredicateFunc1 "$@"
fi
LastError=$?
#echo "-- LastError=$LastError"
echo ''
fi
return $LastError
}
DirectoryLsIterator '' "$RootDirPath" '' 0 IteratorPredicateFunc1
}
|
andry81/contools
|
Scripts/Tools/bash/filelib.sh
|
Shell
|
mit
| 40,016 |
#!/bin/bash
create_conf_dir() {
mkdir -p ${SQUID_DIR}
if [ ! -d ${SQUID_DIR}/conf ]; then
mv /etc/squid ${SQUID_DIR}/conf
else
rm -rf /etc/squid
fi
ln -s ${SQUID_DIR}/conf /etc/squid
}
create_cache_dir() {
if [ ! -d ${SQUID_DIR}/cache ]; then
mv /var/spool/squid ${SQUID_DIR}/cache
else
rm -rf /var/spool/squid
fi
ln -s ${SQUID_DIR}/cache /var/spool/squid
}
create_log_dir() {
if [ ! -d ${SQUID_DIR}/log ]; then
mkdir -p ${SQUID_DIR}/log
chown ${SQUID_USER}: ${SQUID_DIR}/log
fi
rm -rf /var/log/squid
ln -s ${SQUID_DIR}/log /var/log/squid
}
create_conf_dir
create_cache_dir
create_log_dir
chown -R ${SQUID_USER}: ${SQUID_DIR}
rm -f /var/run/squid.pid
if [[ -z $1 ]]; then
if [ ! -d ${SQUID_DIR}/cache/00 ]; then
squid -d 1 -N -z
else
squid -d 1 -N
fi
else
exec $@
fi
|
renebarbosafl/Docker-Squid
|
entrypoint.sh
|
Shell
|
mit
| 807 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2754-1
#
# Security announcement date: 2015-10-05 00:00:00 UTC
# Script generation date: 2017-01-27 21:06:07 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - thunderbird:1:38.3.0+build1-0ubuntu0.12.04.1
#
# Last versions recommanded by security team:
# - thunderbird:1:45.7.0+build1-0ubuntu0.12.04.1
#
# CVE List:
# - CVE-2015-4500
# - CVE-2015-4506
# - CVE-2015-4509
# - CVE-2015-4511
# - CVE-2015-4517
# - CVE-2015-4521
# - CVE-2015-4522
# - CVE-2015-7174
# - CVE-2015-7175
# - CVE-2015-7176
# - CVE-2015-7177
# - CVE-2015-7180
# - CVE-2015-4519
# - CVE-2015-4520
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade thunderbird=1:45.7.0+build1-0ubuntu0.12.04.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2015/USN-2754-1.sh
|
Shell
|
mit
| 937 |
#!/bin/bash
set +e
module="scull"
device="scull"
if grep -q '^staff:' /etc/group; then
group="staff"
else
group="wheel"
fi
# Remove the module if already existing
/sbin/rmmod $module
# Install the module
/sbin/insmod ./$module.ko $* || exit 1
major=$(awk -v module="$module" '{if ($2==module) {print $1}}' /proc/devices)
# Remove any device files already existing
rm -f /dev/${device}[0-3]
mknod /dev/${device}0 c $major 0
mknod /dev/${device}1 c $major 1
mknod /dev/${device}2 c $major 2
mknod /dev/${device}3 c $major 3
|
ChidambaramR/scull
|
scull_load.sh
|
Shell
|
mit
| 532 |
#!/bin/sh
TARGET=target_linux_fsl
rm -fr $TARGET
mkdir $TARGET
cd $TARGET
cmake ../../ -DCMAKE_TOOLCHAIN_FILE=../linux_arm-fsl.cmake
make --trace
ls -l -h ../../target/
|
jxfengzi/tiny
|
cmake/build_linux_arm_fsl.sh
|
Shell
|
mit
| 170 |
#!/bin/bash
ssh -t [email protected] 'sudo sh ~/go/src/github.com/phzfi/RIC/scripts/clear_cache.sh'
ssh [email protected] 'sh ~/go/src/github.com/phzfi/RIC/scripts/start_thumbor_stop_rest.sh'
echo "Waiting 10s for thumbor to boot"
sleep 10s
|
phzfi/RIC
|
report_generator/start_thumbor_stop_rest.sh
|
Shell
|
mit
| 239 |
#! /bin/sh
#this is installing script use ./xxx.sh or nohup xxx.sh &
#set trap
trap 'install finish' 0
trap 'install interapted' 2
#install essential-tools
apt-get update
apt-get install -y build-essential emacs24 vim
apt-get install -y git curl
#install docker
curl -sSL https://get.docker.com/ | sh
#docker pull baseimages
#docker pull fviaatt/securitylearn
#docker run -i -t -p 2222:22 fviaatt/securitylearn /bin/bash
#build Docker container
#mkdir -p ~/work/controll
#cp Dockerfile ~/work/controll/Dockerfile
#cp 000-default.conf ~/work/controll/000-default.conf
#wget https://gist.githubusercontent.com/fvi-att/65854df646be14c29ffa/raw/531f24982f060c267081e64c1d46058c72208385/Dockerfile -O ~/work/controll/Dockerfile
#if you use Vagrant. turn up this line.
cd /vagrant/
docker build -t main_app .
#sleep for ready..
sleep 15s
#container launch settings
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 2222 -j ACCEPT
#RUN DATABASE CONTAINER
docker run -d -p 3306:3306 -e MYSQL_ROOT_PASSWORD=admin --name mariadb mariadb:latest
#RUN APP CONTAINER
docker run -itd --name main_app -p 2222:22 -p 80:80 -v /tmp/:/tmp/ -v /var/www/ --link mariadb:mariadb main_app
#docker exec $CONTROLL_CONTAINER ip addr add 172.17.0.10/16 dev eth0
docker exec -it main_app /bin/bash
#end trap
trap 0
trap 2
# == END ==
|
fvi-att/SecuritylearnSystemLauncher
|
Build.sh
|
Shell
|
mit
| 1,356 |
#!/bin/bash
java -jar /workspace/sys-stack-serv-customer/target/serv-customer-0.0.1-SNAPSHOT.war
|
gspanoae/sys-stack-serv-customer
|
docker-entrypoint.sh
|
Shell
|
mit
| 97 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods/Alamofire.framework'
install_framework 'Pods/Bolts.framework'
install_framework 'Pods/Parse.framework'
install_framework 'Pods/ParseUI.framework'
install_framework 'Pods/RSBarcodes_Swift.framework'
install_framework 'Pods/SwiftyJSON.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods/Alamofire.framework'
install_framework 'Pods/Bolts.framework'
install_framework 'Pods/Parse.framework'
install_framework 'Pods/ParseUI.framework'
install_framework 'Pods/RSBarcodes_Swift.framework'
install_framework 'Pods/SwiftyJSON.framework'
fi
|
naddison36/book-review-ios
|
Pods/Target Support Files/Pods/Pods-frameworks.sh
|
Shell
|
mit
| 3,042 |
#!/usr/bin/env bash
# Verify that no non-test files import Ginkgo or Gomega.
set -e
HAS_TESTING=false
cd ..
for f in $(find . -name "*.go" ! -name "*_test.go" ! -name "tools.go"); do
if grep -q "github.com/onsi/ginkgo" $f; then
echo "$f imports github.com/onsi/ginkgo"
HAS_TESTING=true
fi
if grep -q "github.com/onsi/gomega" $f; then
echo "$f imports github.com/onsi/gomega"
HAS_TESTING=true
fi
done
if "$HAS_TESTING"; then
exit 1
fi
exit 0
|
lucas-clemente/quic-go
|
.github/workflows/no_ginkgo.sh
|
Shell
|
mit
| 467 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2594-1
#
# Security announcement date: 2015-05-05 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:31 UTC
#
# Operating System: Ubuntu 15.04
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - clamav:0.98.7+dfsg-0ubuntu0.15.04.1
#
# Last versions recommanded by security team:
# - clamav:0.98.7+dfsg-0ubuntu0.15.04.1
#
# CVE List:
# - CVE-2015-2170
# - CVE-2015-2221
# - CVE-2015-2222
# - CVE-2015-2305
# - CVE-2015-2668
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade clamav=0.98.7+dfsg-0ubuntu0.15.04.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_15.04/x86_64/2015/USN-2594-1.sh
|
Shell
|
mit
| 728 |
#!/bin/bash
cd "$(dirname "$BASH_SOURCE")" \
&& source 'utils.sh'
declare -a FILES_TO_SYMLINK=(
'shell/bash_aliases'
'shell/bash_autocomplete'
'shell/bash_exports'
'shell/bash_functions'
'shell/bash_logout'
'shell/bash_options'
'shell/bash_profile'
'shell/bash_prompt'
'shell/bashrc'
'shell/curlrc'
'shell/inputrc'
'shell/vimrc'
'shell/screenrc'
'shell/bash_git'
'git/gitattributes'
'git/gitconfig'
'git/gitignore'
# 'vim/vim'
# 'vim/vimrc'
# 'vim/gvimrc'
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
main() {
local i=''
local sourceFile=''
local targetFile=''
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
for i in ${FILES_TO_SYMLINK[@]}; do
sourceFile="$(cd .. && pwd)/$i"
targetFile="$HOME/.$(printf "%s" "$i" | sed "s/.*\/\(.*\)/\1/g")"
if [ ! -e "$targetFile" ]; then
execute "ln -fs $sourceFile $targetFile" "$targetFile → $sourceFile"
elif [ "$(readlink "$targetFile")" == "$sourceFile" ]; then
print_success "$targetFile → $sourceFile"
else
ask_for_confirmation "'$targetFile' already exists, do you want to overwrite it?"
if answer_is_yes; then
rm -rf "$targetFile"
execute "ln -fs $sourceFile $targetFile" "$targetFile → $sourceFile"
else
print_error "$targetFile → $sourceFile"
fi
fi
done
}
main
|
danmelton/dotfiles
|
os/create_symbolic_links.sh
|
Shell
|
mit
| 1,557 |
#!/bin/bash
# Basics
sudo yum install -y git
sudo yum install -y emacs
# Node
sudo curl --silent --location https://rpm.nodesource.com/setup_5.x | bash -
sudo yum install -y nodejs
# NPM
npm install -g bower
npm install -g grunt-cli
# npm install -g gulp ???
npm install
node server.js
|
cshey15/ProGear
|
setup.sh
|
Shell
|
mit
| 292 |
#!/bin/sh
xrandr --output DVI-1 --auto --rotate right
xrandr --output DVI-0 --auto --primary --right-of DVI-1
xrandr --output DisplayPort-0 --auto --right-of DVI-0
|
ErikBjare/dotfiles
|
home/.xrandr/erb-main-kubuntu.sh
|
Shell
|
mit
| 166 |
#!/usr/bin/env bash
echo "Undo hello world"
|
moee/shellprovisioner
|
tasks/samples/helloworld/undo.sh
|
Shell
|
mit
| 45 |
#!/usr/bin/env bash
prefetchExtensionZip() {
declare publisher="${1?}"
declare name="${2?}"
declare version="${3?}"
1>&2 echo
1>&2 echo "------------- Downloading extension ---------------"
declare extZipStoreName="${publisher}-${name}.zip"
declare extUrl="https://${publisher}.gallery.vsassets.io/_apis/public/gallery/publisher/${publisher}/extension/${name}/${version}/assetbyname/Microsoft.VisualStudio.Services.VSIXPackage";
1>&2 echo "extUrl='$extUrl'"
declare nixPrefetchArgs=( --name "$extZipStoreName" --print-path "$extUrl" )
1>&2 printf "$ nix-prefetch-url"
1>&2 printf " %q" "${nixPrefetchArgs[@]}"
1>&2 printf " 2> /dev/null\n"
declare zipShaWStorePath
zipShaWStorePath=$(nix-prefetch-url "${nixPrefetchArgs[@]}" 2> /dev/null)
1>&2 echo "zipShaWStorePath='$zipShaWStorePath'"
echo "$zipShaWStorePath"
}
prefetchExtensionUnpacked() {
declare publisher="${1?}"
declare name="${2?}"
declare version="${3?}"
declare zipShaWStorePath
zipShaWStorePath="$(prefetchExtensionZip "$publisher" "$name" "$version")"
declare zipStorePath
zipStorePath="$(echo "$zipShaWStorePath" | tail -n1)"
1>&2 echo "zipStorePath='$zipStorePath'"
function rm_tmpdir() {
1>&2 printf "rm -rf -- %q\n" "$tmpDir"
rm -rf -- "$tmpDir"
unset tmpDir
trap - INT TERM HUP EXIT
}
function make_trapped_tmpdir() {
tmpDir=$(mktemp -d)
trap rm_tmpdir INT TERM HUP EXIT
}
1>&2 echo
1>&2 echo "------------- Unpacking extension ---------------"
make_trapped_tmpdir
declare unzipArgs=( -q -d "$tmpDir" "$zipStorePath" )
1>&2 printf "$ unzip"
1>&2 printf " %q" "${unzipArgs[@]}"
1>&2 printf "\n"
unzip "${unzipArgs[@]}"
declare unpackedStoreName="${publisher}-${name}"
declare unpackedStorePath
unpackedStorePath="$(nix add-to-store -n "$unpackedStoreName" "$tmpDir")"
declare unpackedSha256
unpackedSha256="$(nix hash-path --base32 --type sha256 "$unpackedStorePath")"
1>&2 echo "unpackedStorePath='$unpackedStorePath'"
1>&2 echo "unpackedSha256='$unpackedSha256'"
rm_tmpdir
echo "$unpackedSha256"
echo "$unpackedStorePath"
}
prefetchExtensionJson() {
declare publisher="${1?}"
declare name="${2?}"
declare version="${3?}"
declare unpackedShaWStorePath
unpackedShaWStorePath="$(prefetchExtensionUnpacked "$publisher" "$name" "$version")"
declare unpackedStorePath
unpackedStorePath="$(echo "$unpackedShaWStorePath" | tail -n1)"
1>&2 echo "unpackedStorePath='$unpackedStorePath'"
declare jsonShaWStorePath
jsonShaWStorePath=$(nix-prefetch-url --print-path "file://${unpackedStorePath}/extension/package.json" 2> /dev/null)
1>&2 echo "jsonShaWStorePath='$jsonShaWStorePath'"
echo "$jsonShaWStorePath"
}
formatExtRuntimeDeps() {
declare publisher="${1?}"
declare name="${2?}"
declare version="${3?}"
declare jsonShaWStorePath
jsonShaWStorePath="$(prefetchExtensionJson "$publisher" "$name" "$version")"
declare jsonStorePath
jsonStorePath="$(echo "$jsonShaWStorePath" | tail -n1)"
1>&2 echo "jsonStorePath='$jsonStorePath'"
# Assume packages without an architectures are for x86_64 and remap arm64 to aarch64.
declare jqQuery
jqQuery=$(cat <<'EOF'
.runtimeDependencies
| map(select(.platforms[] | in({"linux": null, "darwin": null})))
| map(select(.architectures == null).architectures |= ["x86_64"])
| map(del(.architectures[] | select(. | in({"x86_64": null, "arm64": null}) | not)))
| map((.architectures[] | select(. == "arm64")) |= "aarch64")
| map(select(.architectures != []))
| .[] | {
(.id + "__" + (.architectures[0]) + "-" + (.platforms[0])):
{installPath, binaries, urls: [.url, .fallbackUrl] | map(select(. != null))}
}
EOF
)
1>&2 printf "$ cat %q | jq '%s'\n" "$jsonStorePath" "$jqQuery"
cat "$jsonStorePath" | jq "$jqQuery"
}
computeExtRtDepChecksum() {
declare rtDepJsonObject="${1?}"
declare url
url="$(echo "$rtDepJsonObject" | jq -j '.[].urls[0]')"
declare sha256
1>&2 printf "$ nix-prefetch-url '%s'\n" "$url"
sha256="$(nix-prefetch-url "$url")"
1>&2 echo "$sha256"
echo "$sha256"
}
computeAndAttachExtRtDepsChecksums() {
while read -r rtDepJsonObject; do
declare sha256
sha256="$(computeExtRtDepChecksum "$rtDepJsonObject")"
echo "$rtDepJsonObject" | jq --arg sha256 "$sha256" '.[].sha256 = $sha256'
done < <(cat - | jq -c '.')
}
jqStreamToJson() {
cat - | jq --slurp '. | add'
}
jsonToNix() {
# TODO: Replacing this non functional stuff with a proper json to nix
# implementation would allow us to produce a 'rt-deps-bin-srcs.nix' file instead.
false
cat - | sed -E -e 's/": /" = /g' -e 's/,$/;/g' -e 's/ }$/ };/g' -e 's/ ]$/ ];/g'
}
|
NixOS/nixpkgs
|
pkgs/applications/editors/vscode/extensions/_maintainers/update-bin-srcs-lib.sh
|
Shell
|
mit
| 4,677 |
#!/bin/bash
pushd site/
python -m SimpleHTTPServer 9001
popd
|
lukevenediger/jsinsa-firebase-2015
|
serve.sh
|
Shell
|
mit
| 61 |
#!/bin/bash
if [ $# -ne 1 ]
then
echo "Usage: $0 OUTPUTDIR"
exit 1
fi
find $1 -iname '*.gz' -delete
find $1 -iname '*.br' -delete
find $1 -iname '*.html' | xargs sed -i 's|https://kura.io|http://omgkuraio276g5wo.onion|g'
find $1 -iname '*.js' | xargs sed -i 's|https://kura.io|http://omgkuraio276g5wo.onion|g'
find $1 -iname '*.css' | xargs sed -i 's|https://kura.io|http://omgkuraio276g5wo.onion|g'
find $1 -iname '*.map' | xargs sed -i 's|https://kura.io|http://omgkuraio276g5wo.onion|g'
find $1 -iname '*.xml' | xargs sed -i 's|https://kura.io|http://omgkuraio276g5wo.onion|g'
|
kura/kura.io
|
scripts/tor.sh
|
Shell
|
mit
| 591 |
#!/bin/bash
if ! patch -R -p1 --dry-run <$1; then
patch -p1 <$1
fi
|
roscopecoltran/scraper
|
shared/docker/templates/deepdetect/patches/test_patch.sh
|
Shell
|
mit
| 69 |
#!/bin/sh
BIN=bin
${BIN}/pair node0 &
${BIN}/pair node1 &
sleep 3
killall pair
|
begeekmyfriend/nanomsg-tutorial
|
pair.sh
|
Shell
|
cc0-1.0
| 82 |
#!/usr/bin/env bash
for f in required/*.json; do ./node_modules/.bin/jsonlint -q $f; done
for f in optional/*.json; do ./node_modules/.bin/jsonlint -q $f; done
|
cmderdev/vendors
|
testjson.sh
|
Shell
|
cc0-1.0
| 160 |
#!/bin/bash
export LEIN="/usr/local/bin/lein"
export APP_NAME="etcd-experiment"
export APP_DIR="/var/www/${APP_NAME}/current"
export APP_ENV="production"
export APP_SHARED_DIR="/var/www/${APP_NAME}/shared"
cd $APP_DIR && java -jar $APP_DIR/target/${APP_NAME}-0.1.0-SNAPSHOT-standalone.jar >> $APP_SHARED_DIR/log/${APP_NAME}.log 2>&1
|
uswitch/etcd-experiment
|
bin/run.sh
|
Shell
|
epl-1.0
| 334 |
#!/bin/sh
# Copyright (c) 2012-2016 Codenvy, S.A., Red Hat, Inc
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Mario Loriedo - Initial implementation
#
usage () {
printf "%s" "${USAGE}"
}
info() {
printf "${GREEN}INFO:${NC} %s\n" "${1}"
}
debug() {
printf "${BLUE}DEBUG:${NC} %s\n" "${1}"
}
error() {
printf "${RED}ERROR:${NC} %s\n" "${1}"
}
error_exit() {
echo "---------------------------------------"
error "!!!"
error " ${1}"
error "!!!"
echo "---------------------------------------"
exit 1
}
convert_windows_to_posix() {
# "/some/path" => /some/path
OUTPUT_PATH=${1//\"}
echo "/"$(echo "$OUTPUT_PATH" | sed 's/\\/\//g' | sed 's/://')
}
get_clean_path() {
INPUT_PATH=$1
# \some\path => /some/path
OUTPUT_PATH=$(echo ${INPUT_PATH} | tr '\\' '/')
# /somepath/ => /somepath
OUTPUT_PATH=${OUTPUT_PATH%/}
# /some//path => /some/path
OUTPUT_PATH=$(echo ${OUTPUT_PATH} | tr -s '/')
# "/some/path" => /some/path
OUTPUT_PATH=${OUTPUT_PATH//\"}
echo ${OUTPUT_PATH}
}
get_converted_and_clean_path() {
CONVERTED_PATH=$(convert_windows_to_posix "${1}")
CLEAN_PATH=$(get_clean_path "${CONVERTED_PATH}")
echo $CLEAN_PATH
}
get_che_launcher_container_id() {
hostname
}
get_che_launcher_version() {
get_che_image_version ${LAUNCHER_IMAGE_VERSION}
}
is_boot2docker() {
if uname -r | grep -q 'boot2docker'; then
return 0
else
return 1
fi
}
has_docker_for_windows_ip() {
if [ "${ETH0_ADDRESS}" = "10.0.75.2" ]; then
return 0
else
return 1
fi
}
is_docker_for_mac() {
if uname -r | grep -q 'moby' && ! has_docker_for_windows_ip; then
return 0
else
return 1
fi
}
is_docker_for_windows() {
if uname -r | grep -q 'moby' && has_docker_for_windows_ip; then
return 0
else
return 1
fi
}
docker_run() {
ENV_FILE=$(get_list_of_che_system_environment_variables)
docker run -d --name "${CHE_SERVER_CONTAINER_NAME}" \
-v /var/run/docker.sock:/var/run/docker.sock:Z \
-v "$CHE_DATA_LOCATION" \
-p "${CHE_PORT}":"${CHE_PORT}" \
--restart="${CHE_RESTART_POLICY}" \
-e "CHE_LOG_LEVEL=${CHE_LOG_LEVEL}" \
-e "CHE_IP=$CHE_HOST_IP" \
--env-file=$ENV_FILE \
"$@"
rm -rf $ENV_FILE > /dev/null
}
get_user_id() {
CHE_USER_UID=$(docker run -t \
-v /etc/passwd:/etc/passwd:ro,Z \
-v /etc/group:/etc/group:ro,Z \
alpine id -u ${CHE_USER})
CHE_USER_GID=$(docker run -t \
-v /etc/passwd:/etc/passwd:ro,Z \
-v /etc/group:/etc/group:ro,Z \
alpine getent group docker | cut -d: -f3)
echo -n "${CHE_USER_UID}" | tr '\r' ':'; echo -n ${CHE_USER_GID}
}
docker_run_with_che_user() {
if [ "${CHE_USER}" != "root" ]; then
docker_run -e CHE_USER=${CHE_USER} \
-v /etc/group:/etc/group:ro,Z \
-v /etc/passwd:/etc/passwd:ro,Z \
--user=$(get_user_id) \
"$@"
else
docker_run --user="${CHE_USER}" "$@"
fi
}
docker_run_if_in_vm() {
# If the container will run inside of a VM, additional parameters must be set.
# Setting CHE_IN_VM=true will have the che-server container set the values.
if is_docker_for_mac || is_docker_for_windows || is_boot2docker; then
docker_run_with_che_user -e "CHE_IN_VM=true" "$@"
else
docker_run_with_che_user "$@"
fi
}
docker_run_with_assembly() {
if has_assembly; then
docker_run_if_in_vm -v "$CHE_ASSEMBLY_LOCATION" -e "CHE_ASSEMBLY=${CHE_ASSEMBLY}" "$@"
else
docker_run_if_in_vm "$@"
fi
}
docker_run_with_conf() {
if has_che_conf_path; then
docker_run_with_assembly -v "$CHE_CONF_LOCATION" -e "CHE_LOCAL_CONF_DIR=${CHE_CONF}" "$@"
else
docker_run_with_assembly "$@"
fi
}
docker_run_with_external_hostname() {
if has_external_hostname; then
docker_run_with_conf -e "CHE_DOCKER_MACHINE_HOST_EXTERNAL=${CHE_DOCKER_MACHINE_HOST_EXTERNAL}" "$@"
else
docker_run_with_conf "$@"
fi
}
docker_run_with_debug() {
if has_debug && has_debug_suspend; then
docker_run_with_external_hostname -p "${CHE_DEBUG_SERVER_PORT}":8000 \
-e "CHE_DEBUG_SERVER=true" \
-e "JPDA_SUSPEND=y" "$@"
elif has_debug; then
docker_run_with_external_hostname -p "${CHE_DEBUG_SERVER_PORT}":8000 \
-e "CHE_DEBUG_SERVER=true" "$@"
else
docker_run_with_external_hostname "$@"
fi
}
has_debug_suspend() {
if [ "${CHE_DEBUG_SERVER_SUSPEND}" = "false" ]; then
return 1
else
return 0
fi
}
has_debug() {
if [ "${CHE_DEBUG_SERVER}" = "false" ]; then
return 1
else
return 0
fi
}
has_che_conf_path() {
if [ "${CHE_CONF}" = "" ]; then
return 1
else
return 0
fi
}
has_assembly() {
if [ "${CHE_ASSEMBLY}" = "" ]; then
return 1
else
return 0
fi
}
has_external_hostname() {
if [ "${CHE_DOCKER_MACHINE_HOST_EXTERNAL}" = "" ]; then
return 1
else
return 0
fi
}
get_list_of_che_system_environment_variables() {
# See: http://stackoverflow.com/questions/4128235/what-is-the-exact-meaning-of-ifs-n
IFS=$'\n'
DOCKER_ENV=$(mktemp)
# First grab all known CHE_ variables
CHE_VARIABLES=$(env | grep CHE_)
for SINGLE_VARIABLE in "${CHE_VARIABLES}"; do
echo "${SINGLE_VARIABLE}" >> $DOCKER_ENV
done
# Add in known proxy variables
if [ ! -z ${http_proxy+x} ]; then
echo "http_proxy=${http_proxy}" >> $DOCKER_ENV
fi
if [ ! -z ${https_proxy+x} ]; then
echo "https_proxy=${https_proxy}" >> $DOCKER_ENV
fi
if [ ! -z ${no_proxy+x} ]; then
echo "no_proxy=${no_proxy}" >> $DOCKER_ENV
fi
echo $DOCKER_ENV
}
get_docker_install_type() {
if is_boot2docker; then
echo "boot2docker"
elif is_docker_for_windows; then
echo "docker4windows"
elif is_docker_for_mac; then
echo "docker4mac"
else
echo "native"
fi
}
get_docker_host_ip() {
case $(get_docker_install_type) in
boot2docker)
echo $ETH1_ADDRESS
;;
native)
echo $DOCKER0_ADDRESS
;;
*)
echo $ETH0_ADDRESS
;;
esac
}
get_docker_host_os() {
docker info | grep "Operating System:" | sed "s/^Operating System: //"
}
get_docker_daemon_version() {
docker version --format '{{.Server.Version}}' | grep "1\.[0-9]*\.[0-9]*"
}
get_che_hostname() {
INSTALL_TYPE=$(get_docker_install_type)
if [ "${INSTALL_TYPE}" = "boot2docker" ]; then
echo $DEFAULT_DOCKER_HOST_IP
else
echo "localhost"
fi
}
check_docker() {
if [ ! -S /var/run/docker.sock ]; then
error_exit "Docker socket (/var/run/docker.sock) hasn't be mounted \
inside the container. Verify the syntax of the \"docker run\" command."
fi
if ! docker ps > /dev/null 2>&1; then
output=$(docker ps)
error_exit "Error when running \"docker ps\": ${output}"
fi
}
che_container_exist_by_name() {
docker inspect ${1} > /dev/null 2>&1
if [ "$?" == "0" ]; then
return 0
else
return 1
fi
}
che_container_exist() {
if [ "$(docker ps -aq -f "id=${1}" | wc -l)" -eq 0 ]; then
return 1
else
return 0
fi
}
che_container_is_running() {
if [ "$(docker ps -qa -f "status=running" -f "id=${1}" | wc -l)" -eq 0 ]; then
return 1
else
return 0
fi
}
che_container_is_stopped() {
if [ "$(docker ps -qa -f "status=exited" -f "name=${1}" | wc -l)" -eq 0 ]; then
return 1
else
return 0
fi
}
contains() {
string="$1"
substring="$2"
if test "${string#*$substring}" != "$string"
then
return 0 # $substring is in $string
else
return 1 # $substring is not in $string
fi
}
has_container_debug () {
if $(contains $(get_container_debug $1) "<nil>"); then
return 1
else
return 0
fi
}
get_container_debug() {
CURRENT_CHE_DEBUG=$(docker inspect --format='{{.NetworkSettings.Ports}}' ${1})
IFS=$' '
for SINGLE_BIND in $CURRENT_CHE_DEBUG; do
case $SINGLE_BIND in
*8000/tcp:*)
echo $SINGLE_BIND | cut -f2 -d":"
;;
*)
;;
esac
done
}
get_che_container_host_ip_from_container() {
BINDS=$(docker inspect --format="{{.Config.Env}}" "${1}" | cut -d '[' -f 2 | cut -d ']' -f 1)
IFS=$' '
for SINGLE_BIND in $BINDS; do
case $SINGLE_BIND in
*CHE_IP*)
echo $SINGLE_BIND | cut -f2 -d=
;;
*)
;;
esac
done
}
get_che_container_host_bind_folder() {
BINDS=$(docker inspect --format="{{.HostConfig.Binds}}" "${2}" | cut -d '[' -f 2 | cut -d ']' -f 1)
IFS=$' '
for SINGLE_BIND in $BINDS; do
case $SINGLE_BIND in
*$1*)
echo $SINGLE_BIND | cut -f1 -d":"
;;
*)
;;
esac
done
}
get_che_container_conf_folder() {
FOLDER=$(get_che_container_host_bind_folder "/conf:Z" $1)
echo "${FOLDER:=not set}"
}
get_che_container_data_folder() {
FOLDER=$(get_che_container_host_bind_folder "/home/user/che/workspaces:Z" $1)
echo "${FOLDER:=not set}"
}
get_che_container_image_name() {
docker inspect --format="{{.Config.Image}}" "${1}"
}
get_che_image_version() {
image_version=$(echo ${1} | cut -d : -f2 -s)
if [ -n "${image_version}" ]; then
echo "${image_version}"
else
echo "latest"
fi
}
get_che_server_container_id() {
docker inspect -f '{{.Id}}' ${1}
}
get_docker_external_hostname() {
if is_docker_for_mac || is_docker_for_windows; then
echo "localhost"
else
echo ""
fi
}
wait_until_container_is_running() {
CONTAINER_START_TIMEOUT=${1}
ELAPSED=0
until che_container_is_running ${2} || [ ${ELAPSED} -eq "${CONTAINER_START_TIMEOUT}" ]; do
sleep 1
ELAPSED=$((ELAPSED+1))
done
}
wait_until_container_is_stopped() {
CONTAINER_STOP_TIMEOUT=${1}
ELAPSED=0
until che_container_is_stopped ${2} || [ ${ELAPSED} -eq "${CONTAINER_STOP_TIMEOUT}" ]; do
sleep 1
ELAPSED=$((ELAPSED+1))
done
}
server_is_booted() {
HTTP_STATUS_CODE=$(curl -I http://$(docker inspect -f '{{.NetworkSettings.IPAddress}}' "${1}"):$CHE_PORT/api/ \
-s -o /dev/null --write-out "%{http_code}")
if [ "${HTTP_STATUS_CODE}" = "200" ]; then
return 0
else
return 1
fi
}
get_server_version() {
HTTP_STATUS_CODE=$(curl -X OPTIONS http://$(docker inspect -f '{{.NetworkSettings.IPAddress}}' \
"${1}"):$CHE_PORT/api/ -s)
FIRST=${HTTP_STATUS_CODE//\ /}
IFS=','
for SINGLE_BIND in $FIRST; do
case $SINGLE_BIND in
*implementationVersion*)
echo ${SINGLE_BIND//\"} | cut -f2 -d":" | cut -f1 -d"}"
;;
*)
;;
esac
done
}
wait_until_server_is_booted () {
SERVER_BOOT_TIMEOUT=${1}
ELAPSED=0
until server_is_booted ${2} || [ ${ELAPSED} -eq "${SERVER_BOOT_TIMEOUT}" ]; do
sleep 1
ELAPSED=$((ELAPSED+1))
done
}
execute_command_with_progress() {
progress=$1
command=$2
shift 2
pid=""
case "$progress" in
extended)
$command "$@"
;;
basic|*)
$command "$@" &>/dev/null &
pid=$!
while kill -0 "$pid" >/dev/null 2>&1; do
printf "#"
sleep 10
done
wait $pid # return pid's exit code
printf "\n"
;;
esac
printf "\n"
}
|
gazarenkov/che-sketch
|
dockerfiles/launcher/launcher_funcs.sh
|
Shell
|
epl-1.0
| 11,269 |
#!/bin/bash
## check-submodules.sh
##
##
## # Usage
##
## This script represents an 'update' hook for Git.
##
##
## ## Installation
##
## This script should be installed by the developer, such that the
## file would be installed with 'exec' permissions, to a file
##
## ${SOURCE_TREE}/.git/hooks/update
##
## within a ${SOURCE_TREE} of a project, on each developer's local
## filesystem.
##
## Alternately, as in situations requiring multiple update checks,
## this script may be called from the souce tree's `update` hook
## script, as such.
##
##
## ## Usage Notes
##
## From the original author:
##
## "Known caveat is that the '0000...' special case is not handled."
##
##
## # Purpose
##
## This script endeavors to check for unpublished changes in Git
## submodules, to prevent synchronization failure in projects using
## Git submodules.
##
##
## # See also
##
## * _"Pro Git"_
##
## * Section 6.6, _Git Tools - Submodules_,
## Available http://git-scm.com/book/en/Git-Tools-Submodules
##
## * Section 7.3, _Customizing Git - Git Hooks_,
## Available http://git-scm.com/book/en/Customizing-Git-Git-Hooks
##
## * Maual page, run-parts(8)
##
##
## # Metadata
##
## **Origin:** Refer to https://diigo.com/01e9eh
## **License:** Unspecified (assumed: Public Domain)
## **Editor:** Sean Champ <spchamp+gproj _@_ me.com>
## **Timestamp:** 8 December, 2013
# # Safety checks
# ## Command Line Checks
#
# From default ${SOURCE_TREE}/.git/hooks/update.sample
if [ -z "$GIT_DIR" ]; then
echo "Don't run this script from the command line." >&2
echo " (if you want, you could supply GIT_DIR then run" >&2
echo " $0 <ref> <oldrev> <newrev>)" >&2
exit 1
fi
if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then
echo "usage: $0 <ref> <oldrev> <newrev>" >&2
exit 1
fi
# # Main Code
#
# The following is originally cf. https://diigo.com/01e9eh
REF=$1
OLD=$2
NEW=$3
# This update hook is based on the following information:
# http://stackoverflow.com/questions/3418674/bash-shell-script-function-to-verify-git-tag-or-commit-exists-and-has-been-pushe
# Get a list of submodules
git config --file <(git show $NEW:.gitmodules) \
--get-regexp 'submodule..*.path' |
while read key path
do
url=$(git config --file <(git show $NEW:.gitmodules) \
--get "${key/.path/.url}")
git diff "$OLD..$NEW" -- "$path" |
grep -e '^+Subproject commit ' |
cut -f3 -d ' ' |
while read new_rev
do
LINES=$(GIT_DIR="$url" git branch --quiet \
--contains "$new_rev" 2>/dev/null |
wc -l)
if [ $LINES == 0 ]
then
echo "Commit $new_rev not found in submodule $path ($url)" >&2
echo "Please push that submodule first" >&2
exit 1
fi
done || exit 1
done || exit 1
exit 0
|
GazeboHub/gproj-project-manage
|
src/main/resources/git/hooks/update.sh
|
Shell
|
epl-1.0
| 3,056 |
#! /bin/sh
#Test dump feature WS IN (default port) -> FILE OUT
# Stores value from 1 to 100 into cache, with a 16.6 ms pause between 2 attempts
# (to simulate 60 req/s from a game)
echo *Flushing redis...
/home/avsp/applications/redis-2.6.14/src/redis-cli "FLUSHDB"
echo
echo *Starting dump...
curl -X GET http://localhost:4560/controller/fileOutConnector/start/APP_TEST
echo
echo *Go...
for i in `seq 1 100`
do
curl -X POST -d "" http://localhost:4567/collector/APP_TEST/CTX1/NAT1/$i
sleep 0.0166
done
echo
echo *Stopping dump...
curl -X GET http://localhost:4560/controller/fileOutConnector/stop
echo
echo Done!
|
djey47/metrics.blackbox
|
test/file/dump.sh
|
Shell
|
gpl-2.0
| 617 |
# Copyright (C) 2010 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. ./test-utils.sh
aux prepare_vg 3
lvcreate -m 1 -l 1 -n mirror $vg
lvchange -a n $vg/mirror
disable_dev $dev1
not vgreduce --removemissing $vg
not lvchange -v -a y $vg/mirror
lvchange -v --partial -a y $vg/mirror
not lvchange -v --refresh $vg/mirror
lvchange -v --refresh --partial $vg/mirror
# also check that vgchange works
vgchange -a n --partial $vg
vgchange -a y --partial $vg
# check vgremove
vgremove -f $vg
|
SteamMOD/android_bootable_steam_device-mapper
|
test/t-activate-partial.sh
|
Shell
|
gpl-2.0
| 872 |
#!/bin/bash
#
# MacPorts, Apache 2, MySQL 5, PHP 5.3 and phpMyAdmin 3.5.4 installation script for Mac OS X
#
# Author: enekochan
# URL: http://tech.enekochan.com
#
# It is mandatory to have installed:
# - Apple Xcode Developer Tools
# - Apple Command Line Developer Tools
# Download them from http://connect.apple.com/ (Apple ID is needed)
# Once installed run this command to accept the EULA:
#
# $ xcodebuild -license
#
################################################################################
# Important file locations
################################################################################
# httpd.conf: /opt/local/apache2/conf/httpd.conf
# httpd-vhosts.conf: /opt/local/apache2/conf/extra/httpd-vhosts.conf
# htdocs folder: /opt/local/apache2/htdocs
# my.cnf: /opt/local/my.cnf
# php.ini: /opt/local/etc/php5/php.ini
# config.inc.php: /opt/local/apache2/htdocs/phpmyadmin/config.inc.php
################################################################################
#
# Ref: http://gillesfabio.com/blog/2010/12/17/getting-php-5-3-on-mac-os-x/
#
################################################################################
function readPrompt() {
while true; do
read -e -p "$1 (default $2)"": " result
case $result in
Y|y ) result="y"; break;;
N|n ) result="n"; break;;
"" ) result=`echo $2 | awk '{print substr($0,0,1)}'`; break;;
* ) echo "Please answer yes or no.";;
esac
done
}
# If you want to completely uninstall MacPorts and all installed ports
# use the "uninstall" parameter
if [ "$1" == "uninstall" ]; then
echo "Uninstalling MacPorts and all installed ports..."
sudo port -fp uninstall installed
sudo rm -rf \
/opt/local \
/Applications/DarwinPorts \
/Applications/MacPorts \
/Library/LaunchDaemons/org.macports.* \
/Library/Receipts/DarwinPorts*.pkg \
/Library/Receipts/MacPorts*.pkg \
/Library/StartupItems/DarwinPortsStartup \
/Library/Tcl/darwinports1.0 \
/Library/Tcl/macports1.0 \
~/.macports
exit
fi
readPrompt "Do you want Apache 2 and MySQL 5 to autorun on boot? " "y"
AUTORUN=$result
readPrompt "Do you want to secure MySQL 5? (MySQL password for root user will be changed in this interactive process) " "y"
SECURE=$result
readPrompt "Do you want to change Apache 2 proccess running user and group to your user and group? " "y"
CHANGE_USER=$result
readPrompt "Do you want to set Apache 2 ServerName to 127.0.0.1:80? " "y"
CHANGE_SERVER_NAME=$result
readPrompt "Do you want to activate virtual hosts in Apache 2? " "y"
ACTIVATE_VIRTUAL_HOSTS=$result
readPrompt "Do you want to create virtual hosts for localhost? " "y"
LOCALHOST_VIRTUAL_HOST=$result
# Download the MacPort software for the currern Mac OS X version
# Manual download in http://www.macports.org/install.php
VERSION=`sw_vers -productVersion`
VERSION=${VERSION:3:1}
if [ "$VERSION" == "6" ]; then
URL=https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.6-SnowLeopard.pkg
elif [ "$VERSION" == "7" ]; then
URL=https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.7-Lion.pkg
elif [ "$VERSION" == "8" ]; then
URL=https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.8-MountainLion.pkg
elif [ "$VERSION" == "9" ]; then
URL=https://distfiles.macports.org/MacPorts/MacPorts-2.2.1-10.9-Mavericks.pkg
fi
if [ "$URL" == "" ]; then
echo "MacPort can only be installed automatically in Mac OS X 10.6, 10.7, 10.8 or 10.9"
exit
fi
curl -O $URL
FILE_NAME=`echo $URL | sed -e "s/\https:\/\/distfiles.macports.org\/MacPorts\///g"`
sudo installer -pkg $FILE_NAME -target /
# Update MacPorts package database
sudo port -d selfupdate
# Install Apache 2
sudo port install apache2
if [ $AUTORUN == "y" ]; then
# Make Apache 2 autorun on boot
# This creates the file /Library/LaunchDaemons/org.macports.apache2.plist
# and a /opt/local/bin/daemondo process to manage it.
sudo port load apache2
else
# Run the Apache 2 service
sudo /opt/local/etc/LaunchDaemons/org.macports.apache2/apache2.wrapper start
fi
# Install MySQL 5
sudo port install mysql5-server
# Configure the MySQL 5 database files and folders
sudo -u _mysql mysql_install_db5
sudo chown -R mysql:mysql /opt/local/var/db/mysql5/
sudo chown -R mysql:mysql /opt/local/var/run/mysql5/
sudo chown -R mysql:mysql /opt/local/var/log/mysql5/
# Remain compatible with other programs that may look for the socket file in its original location
sudo ln -s /tmp/mysql.sock /opt/local/var/run/mysql5/mysqld.sock
# Create a my.cnf file from the "small" template
# It can also be copied to /etc/my.cnf or /opt/local/var/db/mysql5/my.cnf (deprecated)
# This file should be in /opt/local/etc/mysql/my.cnf but /opt/local/share/mysql5/mysql/mysql.server
# MySQL daemon start/stop script looks for it (in this order) in /etc/my.cnf, /opt/local/my.cnf
# and /opt/local/var/db/mysql5/my.cnf. If you look the script it uses the $basedir and $datadir
# variables to search it.
sudo cp /opt/local/share/mysql5/mysql/my-small.cnf /opt/local/my.cnf
if [ $AUTORUN == "y" ]; then
# Make MySQL 5 autorun on boot
# This creates the file /Library/LaunchDaemons/org.macports.mysql5.plist
# and a /opt/local/bin/daemondo process to manage it.
sudo port load mysql5-server
else
# Run the MySQL 5 service
sudo /opt/local/etc/LaunchDaemons/org.macports.mysql5/mysql5.wrapper start
fi
# Secure MySQL 5 configuration
# root password in blank by default
# This is an optional step that changes root password, deletes anonymous users,
# disables remote logins for root user and deletes the test database
# If you only want to change root password run this command:
# $ mysqladmin5 -u root -p password <your-password>
if [ $SECURE == "y" ]; then
/opt/local/bin/mysql_secure_installation5
fi
# Install PHP 5.3
sudo port install php5 +apache2 +pear
# You can add mire php5 extension. Run `port search php5-` to see available extensions
sudo port install php5-mysql php5-sqlite php5-xdebug php5-mbstring php5-iconv php5-posix php5-apc php5-mcrypt
# Register PHP 5.3 with Apache 2
cd /opt/local/apache2/modules
sudo /opt/local/apache2/bin/apxs -a -e -n "php5" libphp5.so
# Create the php.ini file from the development template
cd /opt/local/etc/php5
sudo cp php.ini-development php.ini
# Configure the timezone and the socket of MySQL in /opt/local/etc/php5/php.ini
TIMEZONE=`systemsetup -gettimezone | awk '{ print $3 }'`
TIMEZONE=$(printf "%s\n" "$TIMEZONE" | sed 's/[][\.*^$/]/\\&/g')
sudo sed \
-e "s/;date.timezone =/date.timezone = \"$TIMEZONE\"/g" \
-e "s#pdo_mysql\.default_socket.*#pdo_mysql\.default_socket=`/opt/local/bin/mysql_config5 --socket`#" \
-e "s#mysql\.default_socket.*#mysql\.default_socket=`/opt/local/bin/mysql_config5 --socket`#" \
-e "s#mysqli\.default_socket.*#mysqli\.default_socket=`/opt/local/bin/mysql_config5 --socket`#" \
php.ini > /tmp/php.ini
sudo chown root:admin /tmp/php.ini
sudo mv /tmp/php.ini ./
# Include PHP 5.3 in Apache 2 configuration
sudo echo "" | sudo tee -a /opt/local/apache2/conf/httpd.conf
sudo echo "Include conf/extra/mod_php.conf" | sudo tee -a /opt/local/apache2/conf/httpd.conf
if [ $CHANGE_USER == "y" ]; then
# Change the user and group of the Apache 2 proccess to current user
# By default it is www:www
sudo sed \
-e 's/User www/User `id -un`/g' \
-e 's/Group www/Group `id -gn`/g' \
/opt/local/apache2/conf/httpd.conf > /tmp/httpd.conf
sudo chown root:admin /tmp/httpd.conf
sudo mv /tmp/httpd.conf /opt/local/apache2/conf/httpd.conf
fi
if [ $CHANGE_SERVER_NAME == "y" ]; then
# This solves this warning:
# httpd: Could not reliably determine the server's fully qualified domain name, using enekochans-Mac-mini.local for ServerName
# Just fill ServerName option in httpd.conf with 127.0.0.1:80
sudo sed \
-e 's/#ServerName www.example.com:80/ServerName 127.0.0.1:80/g' \
/opt/local/apache2/conf/httpd.conf > /tmp/httpd.conf
sudo chown root:admin /tmp/httpd.conf
sudo mv /tmp/httpd.conf /opt/local/apache2/conf/httpd.conf
fi
if [ $ACTIVATE_VIRTUAL_HOSTS == "y" ]; then
sudo sed \
-e 's/#Include conf\/extra\/httpd-vhosts.conf/Include conf\/extra\/httpd-vhosts.conf/g' \
/opt/local/apache2/conf/httpd.conf > /tmp/httpd.conf
sudo chown root:admin /tmp/httpd.conf
sudo mv /tmp/httpd.conf /opt/local/apache2/conf/httpd.conf
fi
if [ $LOCALHOST_VIRTUAL_HOST ]; then
echo "" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo "<VirtualHost *:80>" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " ServerAdmin webmaster@localhost" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " DocumentRoot \"/opt/local/apache2/htdocs\"" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " ServerName localhost" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " ServerAlias localhost" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " ErrorLog \"logs/localhost-error_log\"" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " CustomLog \"logs/localhost-access_log\" common" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo " DirectoryIndex index.php index.html" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo "</VirtualHost>" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
echo "" | sudo tee -a /opt/local/apache2/conf/extra/httpd-vhosts.conf
fi
# Install phpMyAdmin in localhost (http://localhost/phpmyadmin)
cd /opt/local/apache2/htdocs
sudo curl --location -O http://sourceforge.net/projects/phpmyadmin/files/phpMyAdmin/3.5.4/phpMyAdmin-3.5.4-all-languages.tar.gz
tar xzvpf phpMyAdmin-3.5.4-all-languages.tar.gz
#sudo rm phpMyAdmin-3.5.4-all-languages.tar.gz
sudo mv phpMyAdmin-3.5.4-all-languages phpmyadmin
cd phpmyadmin
sudo cp config.sample.inc.php config.inc.php
echo "Enter MySQL's root password."
mysql5 -u root -p < examples/create_tables.sql
read -e -p "Enter a password for pma user in phpmyadmin database"": " result
PMA_PASSWORD=$result
echo "GRANT USAGE ON mysql.* TO 'pma'@'localhost' IDENTIFIED BY '$PMA_PASSWORD';" > /tmp/grant.sql
echo "GRANT SELECT (" >> /tmp/grant.sql
echo " Host, User, Select_priv, Insert_priv, Update_priv, Delete_priv," >> /tmp/grant.sql
echo " Create_priv, Drop_priv, Reload_priv, Shutdown_priv, Process_priv," >> /tmp/grant.sql
echo " File_priv, Grant_priv, References_priv, Index_priv, Alter_priv," >> /tmp/grant.sql
echo " Show_db_priv, Super_priv, Create_tmp_table_priv, Lock_tables_priv," >> /tmp/grant.sql
echo " Execute_priv, Repl_slave_priv, Repl_client_priv" >> /tmp/grant.sql
echo " ) ON mysql.user TO 'pma'@'localhost';" >> /tmp/grant.sql
echo "GRANT SELECT ON mysql.db TO 'pma'@'localhost';" >> /tmp/grant.sql
echo "GRANT SELECT ON mysql.host TO 'pma'@'localhost';" >> /tmp/grant.sql
echo "GRANT SELECT (Host, Db, User, Table_name, Table_priv, Column_priv)" >> /tmp/grant.sql
echo " ON mysql.tables_priv TO 'pma'@'localhost';" >> /tmp/grant.sql
echo "GRANT SELECT, INSERT, UPDATE, DELETE ON phpmyadmin.* TO 'pma'@'localhost';" >> /tmp/grant.sql
echo "FLUSH PRIVILEGES;" >> /tmp/grant.sql
echo "Enter MySQL's root password."
mysql5 -u root -p < /tmp/grant.sql
rm /tmp/grant.sql
# Fill the blowfish_secret password with random value,
# uncomment all lines with "$cfg['Servers'][$i]", change pma users password
# comment back the Swekey authentication configuration line
BLOWFISH1=$(printf "%s\n" "\$cfg['blowfish_secret']" | sed 's/[][\.*^$/]/\\&/g')
PASS=`env LC_CTYPE=C tr -dc "a-zA-Z0-9-_\$\?\{\}\=\^\+\(\)\@\%\|\*\[\]\~" < /dev/urandom | head -c 46`
BLOWFISH2=$(printf "%s\n" "\$cfg['blowfish_secret'] = '$PASS';" | sed 's/[][\.*^$/]/\\&/g')
TEXT1=$(printf "%s\n" "// \$cfg['Servers'][\$i]" | sed 's/[][\.*^$/]/\\&/g')
TEXT2=$(printf "%s\n" "\$cfg['Servers'][\$i]" | sed 's/[][\.*^$/]/\\&/g')
sudo sed \
-e "s/^$BLOWFISH1.*/$BLOWFISH2/g" \
-e "s/$TEXT1/$TEXT2/g" \
-e "s/pmapass/$PMA_PASSWORD/g" \
-e "/swekey-pma.conf/s/^/\/\/ /" \
/opt/local/apache2/htdocs/phpmyadmin/config.inc.php > /tmp/config.inc.php
sudo chown root:admin /tmp/config.inc.php
sudo mv /tmp/config.inc.php /opt/local/apache2/htdocs/phpmyadmin/config.inc.php
PMA_PASSWORD=""
PASS=""
BLOWFISH2=""
# Restart Apache 2
sudo /opt/local/apache2/bin/apachectl -k restart
|
almc/ar_tracker
|
scripts/installMacPorts-Apache-MySQL-PHP-phpMyAdmin-OSX.sh
|
Shell
|
gpl-2.0
| 12,372 |
#!/bin/sh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
op=editor.tcl
# Figure the location of the mptcl library
script_dir=$(cd $(dirname $0) ; pwd)
for target in . ../lib ../lib/mptcl ../mptcl
do
if [ -f $script_dir/$target/$op ] ; then
mptcl_lib="$(cd $script_dir/$target ; pwd)"
break
fi
done
if [ -z "$mptcl_lib" ] ; then
echo "Unable to determine mptcl_lib location" 1>&2
exit 1
fi
exec wish $mptcl_lib/$op
|
alejandroliu/dotfiles
|
libsrc/mptcl/edled.sh
|
Shell
|
gpl-2.0
| 1,053 |
#!/sbin/busybox sh
#
## Create the kernel data directory
mkdir /data/.agat
chmod 777 /data/.agat
## Enable "post-init" Logging do not enable if /sbin/init is already logging ...
mv /data/.agat/post-init.log /data/.agat/post-init.log.bak
busybox date >/data/.agat/post-init.log
exec >>/data/.agat/post-init.log 2>&1
ccxmlsum=`md5sum /res/customconfig/customconfig.xml | awk '{print $1}'`
if [ "a${ccxmlsum}" != "a`cat /data/.agat/.ccxmlsum`" ];
then
rm -f /data/.agat/*.profile
echo ${ccxmlsum} > /data/.agat/.ccxmlsum
fi
[ ! -f /data/.agat/default.profile ] && cp /res/customconfig/default.profile /data/.agat
[ ! -f /data/.agat/battery.profile ] && cp /res/customconfig/battery.profile /data/.agat
[ ! -f /data/.agat/performance.profile ] && cp /res/customconfig/performance.profile /data/.agat
. /res/customconfig/customconfig-helper
read_defaults
read_config
## cpu undervolting
echo "${cpu_undervolting}" > /sys/devices/system/cpu/cpu0/cpufreq/vdd_levels
## change cpu step counts
case "${cpustepcount}" in
5)
echo 1200 1000 800 500 200 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
;;
6)
echo 1400 1200 1000 800 500 200 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
;;
7)
echo 1500 1400 1200 1000 800 500 200 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
;;
8)
echo 1600 1400 1200 1000 800 500 200 100 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
;;
9)
echo 1600 1500 1400 1200 1000 800 500 200 100 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
;;
18)
echo 1600 1500 1400 1300 1200 1100 1000 900 800 700 600 500 400 300 200 100 50 25 > /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_frequencies
;;
esac;
## Android Logger
if [ "${logger}" == "on" ];then
insmod /lib/modules/logger.ko
else
## disable debugging on some modules
# rm -rf /dev/log
echo 0 > /sys/module/ump/parameters/ump_debug_level
echo 0 > /sys/module/mali/parameters/mali_debug_level
echo 0 > /sys/module/kernel/parameters/initcall_debug
echo 0 > /sys//module/lowmemorykiller/parameters/debug_level
echo 0 > /sys/module/earlysuspend/parameters/debug_mask
echo 0 > /sys/module/alarm/parameters/debug_mask
echo 0 > /sys/module/alarm_dev/parameters/debug_mask
echo 0 > /sys/module/binder/parameters/debug_mask
echo 0 > /sys/module/xt_qtaguid/parameters/debug_mask
fi
#enable kmem interface for everyone by GM.
echo 0 > /proc/sys/kernel/kptr_restrict
# Set color mode to user mode
echo "1" > /sys/devices/platform/samsung-pd.2/mdnie/mdnie/mdnie/user_mode
## for ntfs automounting
# insmod /lib/modules/fuse.ko
mkdir /mnt/ntfs
mount -t tmpfs tmpfs /mnt/ntfs
chmod 777 /mnt/ntfs
/sbin/busybox sh /sbin/ext/properties.sh
/sbin/busybox sh /sbin/ext/install.sh
## run this because user may have chosen not to install root at boot
## but he may need it later and install it using ExTweaks
/sbin/busybox sh /sbin/ext/su-helper.sh
##### Early-init phase tweaks #####
/sbin/busybox sh /sbin/ext/tweaks.sh
/sbin/busybox mount -t rootfs -o remount,ro rootfs
##### EFS Backup #####
(
# make sure that sdcard is mounted
sleep 30
/sbin/busybox sh /sbin/ext/efs-backup.sh
) &
# apply ExTweaks defaults
/res/uci.sh apply
/res/uci.sh soundgasm_hp ${soundgasm_hp}
/res/customconfig/actions/usb-mode ${usb_mode}
##### init scripts #####
(
/sbin/busybox sh /sbin/ext/run-init-scripts.sh
)&
|
agat63/AGAT_FI27_kernel
|
initramfs/sbin/ext/post-init.sh
|
Shell
|
gpl-2.0
| 3,468 |
#!/bin/bash
if [ $1 -gt 0 ]
then
echo "$1 is positive"
elif [ $1 -lt 0 ]
then
echo "$1 is negative"
elif [ $1 -eq 0 ]
then
echo "$1 is zero"
else
echo "$1 is not a number"
fi
|
Furzoom/demo-C
|
src/shell/condition_if_elif_else_fi.sh
|
Shell
|
gpl-2.0
| 191 |
#!/bin/sh
#
# Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
# CA 95054 USA or visit www.sun.com if you need additional information or
# have any questions.
#
# @test
# @bug 6265810 6705893
# @build CheckEngine
# @run shell jrunscript-eTest.sh
# @summary Test that output of 'jrunscript -e' matches the dash-e.out file
. ${TESTSRC-.}/common.sh
setup
${JAVA} -cp ${TESTCLASSES} CheckEngine
if [ $? -eq 2 ]; then
echo "No js engine found and engine not required; test vacuously passes."
exit 0
fi
rm -f jrunscript-eTest.out 2>/dev/null
${JRUNSCRIPT} -e "println('hello')" > jrunscript-eTest.out 2>&1
diff jrunscript-eTest.out ${TESTSRC}/dash-e.out
if [ $? != 0 ]
then
echo "Output of jrunscript -e differ from expected output. Failed."
rm -f jrunscript-eTest.out 2>/dev/null
exit 1
fi
# -e option with JavaScript explicitly choosen as language
rm -f jrunscript-eTest.out 2>/dev/null
${JRUNSCRIPT} -l js -e "println('hello')" > jrunscript-eTest.out 2>&1
diff jrunscript-eTest.out ${TESTSRC}/dash-e.out
if [ $? != 0 ]
then
echo "Output of jrunscript -e differ from expected output. Failed."
rm -f jrunscript-eTest.out 2>/dev/null
exit 1
fi
rm -f jrunscript-eTest.out
echo "Passed"
exit 0
|
TheTypoMaster/Scaper
|
openjdk/jdk/test/sun/tools/jrunscript/jrunscript-eTest.sh
|
Shell
|
gpl-2.0
| 2,099 |
#/bin/sh
docker build --tag=build-vkquake docker
docker run --rm --privileged -e VERSION=`./get-version.sh` -v ${PWD}/..:/usr/src/vkQuake build-vkquake /usr/src/vkQuake/AppImage/run-in-docker.sh
|
Novum/vkQuake
|
AppImage/build-appimage.sh
|
Shell
|
gpl-2.0
| 195 |
#!/bin/bash
#set -x
# if no arguments given, start with interactive terminal
if test $# -lt 1; then
args="-t -i flixr/pprz-dev"
else
# Use this script with derived images, and pass your 'docker run' args
args="$@"
fi
# check if running on Linux or OSX
UNAME=$(uname -s)
############################################################
# share this paparazzi directory with the container
############################################################
# set PAPARAZZI_SRC to this tree
# on OSX: readlink doesn't have the -f or -m options, try using pwd
if [ $UNAME == "Linux" ]; then
SCRIPT=$(readlink -f $0)
SCRIPT_DIR=$(dirname $(readlink -f $0))
PAPARAZZI_SRC=$(readlink -m $SCRIPT_DIR/..)
else
PAPARAZZI_SRC=$(dirname $(pwd))
fi
# PAPARAZZI_HOME inside the container
PPRZ_HOME_CONTAINER=/home/pprz/paparazzi
# share the paparazzi directory and set it as working directory
SHARE_PAPARAZZI_HOME_OPTS="--volume=$PAPARAZZI_SRC:$PPRZ_HOME_CONTAINER \
--env=PAPARAZZI_HOME=$PPRZ_HOME_CONTAINER \
--env=PAPARAZZI_SRC=$PPRZ_HOME_CONTAINER \
-w $PPRZ_HOME_CONTAINER"
############################################################
# grant access to X-Server
############################################################
if [ $UNAME == "Linux" ]; then
XSOCK=/tmp/.X11-unix
XAUTH=/tmp/.docker.xauth
touch $XAUTH
xauth nlist $DISPLAY | sed -e 's/^..../ffff/' | xauth -f $XAUTH nmerge -
# options to grant access to the Xserver
X_WINDOW_OPTS="--volume=$XSOCK:$XSOCK --volume=$XAUTH:$XAUTH --env=XAUTHORITY=${XAUTH} --env=DISPLAY=${DISPLAY}"
fi
# using xauth with docker on OSX doesn't work, so we use socat:
# see https://github.com/docker/docker/issues/8710
if [ $UNAME == "Darwin" ]; then
X_WINDOW_OPTS="--env=DISPLAY=192.168.99.1:0"
TCPPROXY="socat TCP-LISTEN:6000,reuseaddr,fork UNIX-CLIENT:\"$DISPLAY\""
fi
############################################################
# Audio
############################################################
if [ $UNAME == "Linux" ]; then
# pass audio to pulseaudio server on host
USER_UID=$(id -u)
PULSE_AUDIO_OPTS="--volume=/run/user/${USER_UID}/pulse:/run/pulse"
fi
############################################################
# USB
############################################################
# give the container access to USB, WARNING runs it as priviliged container!
# use it if ENABLE_USB variable is non-empty/zero
if [ -n "$PRIVILEGED_USB" ]; then
echo "WARNING: running as priviliged container to enable complete USB access!"
echo "Better pass devices explicitly: ./run.sh -i -t --device=/dev/ttyUSB0 flixr/pprz-dev bash"
USB_OPTS="--privileged --volume=/dev/bus/usb:/dev/bus/usb"
fi
# try to detect which USB devices to pass to the container automatically
# set DISABLE_USB=1 to turn it off
if [ -z "$DISABLE_USB" ]; then
# find on OSX doesn't have the -printf option... so use exec echo instead
USB_OPTS=$(find /dev -maxdepth 1 \( -name "ttyACM?" -or -name "ttyUSB?" \) -exec echo -n "--device={} " \;)
if [ -n "$USB_OPTS" ]; then
echo Passing auto-detected USB devices: $USB_OPTS
fi
fi
############################################################
# Run it!
############################################################
if [ $UNAME == "Darwin" ]; then
# start socat in background to forward the X socket via TCP
$TCPPROXY &
fi
# run the docker container with all the fancy options
docker run \
${X_WINDOW_OPTS} \
${PULSE_AUDIO_OPTS} \
${USB_OPTS} \
${SHARE_PAPARAZZI_HOME_OPTS} \
--rm $args
############################################################
# cleanup after exiting from docker container
############################################################
# cleanup XAUTHORITY file again
rm -f $XAUTH
# on OSX kill background socat process again
if [ $UNAME == "Darwin" ]; then
pkill -f "$TCPPROXY"
fi
|
LodewijkSikkel/paparazzi
|
docker/run.sh
|
Shell
|
gpl-2.0
| 3,900 |
#!/bin/bash
# =====================================================================================================
# Copyright (C) steady.sh v1.2 2016 iicc (@iicc1)
# =====================================================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# this program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
# =======================================================================================================
# It depends on Tmux https://github.com/tmux/tmux which is BSD-licensed
# and Screen https://www.gnu.org/software/screen GNU-licensed.
# =======================================================================================================
# This script is intended to control the state of a telegram-cli telegram bot running in background.
# The idea is to get the bot fully operative all the time without any supervision by the user.
# It should be able to recover the telegram bot in any case telegram-cli crashes, freezes or whatever.
# This script works by tracing ctxt swithes value in kernel procces at a $RELOADTIME
# So it can detect any kind of kernel interruption with the procces and reload the bot.
#
#--------------------------------------------------
#-- ▒▒▒▒▒▒▒▒▒ --
#-- ▒▒█████▒▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒█▒█▒▒▒█▒ --
#-- ▒▒██▒▒█▒▒ --
#-- ▒▒▒▒▒▒▒▒▒ --
#-- ▒███████▒ --
#-- ▒▒▒▒█▒▒█▒ --
#-- ▒▒▒▒█▒▒█▒ --
#-- ▒▒▒▒█▒▒█▒ --
#-- ▒▒▒▒▒██▒▒ --
#-- ▒▒▒▒▒▒▒▒▒ --
#-- ▒███████▒ --
#-- ▒▒▒▒▒▒█▒▒ --
#-- ▒▒▒▒██▒▒▒ --
#-- ▒▒▒▒▒▒█▒▒ --
#-- ▒███████▒ --
#-- ▒▒▒▒▒▒▒▒▒ --
#-- ▒▒█████▒▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒▒█████▒▒ --
#-- ▒▒▒▒▒▒▒▒▒ --
#-- ▒███████▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒█▒▒▒▒▒█▒ --
#-- ▒▒█████▒▒ --
#-- ▒▒▒▒▒▒▒▒▒ --
#-- --
#--------------------------------------------------
#-- --
#-- Developers: @CRUEL --
#-- --
#-- --
#--------------------------------------------------
# Some script variables
OK=0
BAD=0
NONVOLUNTARY=1
NONVOLUNTARYCHECK=0
VOLUNTARY=1
VOLUNTARYCHECK=0
I=1
BOT=Spam # You can put here other bots. Also you can change it to run more than one bot in the same server.
RELOADTIME=10 # Time between checking cpu calls of the cli process. Set the value high if your bot does not receive lots of messages.
LAUNCHER=start.sh
function tmux_mode {
sleep 0.5
clear
# Space invaders thanks to github.com/windelicato
f=3 b=4
for j in f b; do
for i in {0..7}; do
printf -v $j$i %b "\e[${!j}${i}m"
done
done
bld=$'\e[1m'
rst=$'\e[0m'
cat << EOF
$f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$f1█▀███████▀█ $f2▀▀███▀▀███▀▀ $f3▀█▀██▀█▀ $f4█▀███████▀█ $f5▀▀███▀▀███▀▀ $f6▀█▀██▀█▀$rst
$f1▀ ▀▄▄ ▄▄▀ ▀ $f2 ▀█▄ ▀▀ ▄█▀ $f3▀▄ ▄▀ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5 ▀█▄ ▀▀ ▄█▀ $f6▀▄ ▄▀$rst
EOF
echo -e " \e[100m Steady script \e[00;37;40m"
echo -e "\033[38;5;208m ___ _____ ___ ___ \033[0;00m"
echo -e "\033[38;5;208m / /\ / /::\ ___ / /\ /__/\ \033[0;00m"
echo -e "\033[38;5;208m / /::\ / /:/\:\ /__/\ / /::\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m / /:/\:\ / /:/ \:\ \ \:\ / /:/\:\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m / /:/~/::\ /__/:/ \__\:| \ \:\ / /:/~/::\ _____\__\:\ \033[0;00m"
echo -e "\033[38;5;208m /__/:/ /:/\:\ \ \:\ / /:/ ___ \__\:\ /__/:/ /:/\:\ /__/::::::::\ \033[0;00m"
echo -e "\033[38;5;208m \ \:\/:/__\/ \ \:\ /:/ /__/\ : |:| \ \:\/:/__\/ \ \:\~~\~~\/ \033[0;00m"
echo -e "\033[38;5;208m \ \::/ \ \:\/:/ \ \:\| |:| \ \::/ \ \:\ ~~~ \033[0;00m"
echo -e "\033[38;5;208m \ \:\ \ \::/ \ \:\__|:| \ \:\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m \ \:\ \__\/ \__\::::/ \ \:\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m \__\/ ~~~~ \__\/ \__\/ \033[0;00m"
echo -e "\033[38;5;208m \e[01;34m https://github.com/janlou/AdvanSource \e[00;37;40m"
echo ""
cat << EOF
$bld$f1▄ ▀▄ ▄▀ ▄ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4▄ ▀▄ ▄▀ ▄ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$bld$f1█▄█▀███▀█▄█ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4█▄█▀███▀█▄█ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$bld$f1▀█████████▀ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4▀█████████▀ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst
$bld$f1 ▄▀ ▀▄ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4 ▄▀ ▀▄ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst
EOF
sleep 1.2
# Checking if the bot folder is in HOME
echo -e "$bld$f4 CHECKING INSTALLED BOT...$rst"
sleep 0.5
ls ../ | grep $BOT > /dev/null
if [ $? != 0 ]; then
echo -e "$f1 ERROR: BOT: $BOT NOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 4
exit 1
fi
echo -e "$f2 $BOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 0.5
echo ""
echo -e "\033[38;5;208m 𝓟𝓸𝔀𝒆𝓻𝒆𝓭 𝓫𝔂: \033[0;00m"
echo -e "\033[38;5;208m Advan Team \033[0;00m"
echo -e "\033[38;5;208m @GPMod :) @cruel_channel \033[0;00m"
echo -e "\033[38;5;208m @GPMod :) @cruel_channel \033[0;00m"
sleep 1.5
echo -e "$bld$f4 CHECKING PROCESSES...$rst"
sleep 0.7
# Looks for the number of screen/telegram-cli processes
CLINUM=`ps -e | grep -c telegram-cli`
echo "$f2 RUNNING $CLINUM TELEGRAM-CLI PROCESS$rst"
sleep 0.9
# =====Setup ends===== #
# Opening new tmux in a daemon
echo -e "$bld$f4 ATTACHING TMUX AS DAEMON...$rst"
# It is recommended to clear cli status always before starting the bot
rm ../.telegram-cli/state > /dev/null
# Nested TMUX sessions trick
TMUX= tmux new-session -d -s $BOT "./$LAUNCHER"
sleep 1.3
CLIPID=`ps -e | grep telegram-cli | head -1 | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
echo -e "$f2 NEW TELEGRAM-CLI PROCESS: $CLIPID$rst"
echo ""
echo ""
# Locating telegram-cli status
cat /proc/$CLIPID/task/$CLIPID/status > STATUS
NONVOLUNTARY=`grep nonvoluntary STATUS | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
sleep 3
# :::::::::::::::::::::::::
# ::::::: MAIN LOOP :::::::
# :::::::::::::::::::::::::
while true; do
echo -e "$f2 TIMES CHECKED AND RUNNING:$f5 $OK $rst"
echo -e "$f2 TIMES FAILED AND RECOVERED:$f5 $BAD $rst"
echo ""
cat /proc/$CLIPID/task/$CLIPID/status > CHECK
if [ $? != 0 ]; then
I=$(( $I + 1 ))
if [ $I -ge 3 ]; then
kill $CLIPID
tmux kill-session -t $BOT
rm ../.telegram-cli/state > /dev/null
NONVOLUNTARY=0
NONVOLUNTARYCHECK=0
VOLUNTARY=0
VOLUNTARYCHECK=0
fi
else
I=1
fi
VOLUNTARYCHECK=`grep voluntary CHECK | head -1 | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
NONVOLUNTARYCHECK=`grep nonvoluntary CHECK | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
if [ $NONVOLUNTARY != $NONVOLUNTARYCHECK ] || [ $VOLUNTARY != $VOLUNTARYCHECK ]; then
echo -e "$f5 BOT RUNNING!$rst"
OK=$(( $OK + 1 ))
else
echo -e "$f5 BOT NOT RUNING, TRYING TO RELOAD IT...$rst"
BAD=$(( $BAD + 1 ))
sleep 1
rm ../.telegram-cli/state > /dev/null
kill $CLIPID
tmux kill-session -t $BOT
TMUX= tmux new-session -d -s $BOT "./$LAUNCHER"
sleep 1
CLIPID=`ps -e | grep telegram-cli | head -1 | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
if [ -z "${CLIPID}" ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PROCESS NOT RUNNING$rst"
echo -e "$f1 FAILED TO RECOVER BOT$rst"
sleep 3
exit 1
fi
fi
VOLUNTARY=`echo $VOLUNTARYCHECK`
NONVOLUNTARY=`echo $NONVOLUNTARYCHECK`
sleep $RELOADTIME
rm CHECK
done
}
function screen_mode {
clear
sleep 0.5
# Space invaders thanks to github.com/windelicato
f=3 b=4
for j in f b; do
for i in {0..7}; do
printf -v $j$i %b "\e[${!j}${i}m"
done
done
bld=$'\e[1m'
rst=$'\e[0m'
cat << EOF
$f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$f1█▀███████▀█ $f2▀▀███▀▀███▀▀ $f3▀█▀██▀█▀ $f4█▀███████▀█ $f5▀▀███▀▀███▀▀ $f6▀█▀██▀█▀$rst
$f1▀ ▀▄▄ ▄▄▀ ▀ $f2 ▀█▄ ▀▀ ▄█▀ $f3▀▄ ▄▀ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5 ▀█▄ ▀▀ ▄█▀ $f6▀▄ ▄▀$rst
EOF
echo -e " \e[100m Steady script \e[00;37;40m"
echo -e "\033[38;5;208m ___ _____ ___ ___ \033[0;00m"
echo -e "\033[38;5;208m / /\ / /::\ ___ / /\ /__/\ \033[0;00m"
echo -e "\033[38;5;208m / /::\ / /:/\:\ /__/\ / /::\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m / /:/\:\ / /:/ \:\ \ \:\ / /:/\:\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m / /:/~/::\ /__/:/ \__\:| \ \:\ / /:/~/::\ _____\__\:\ \033[0;00m"
echo -e "\033[38;5;208m /__/:/ /:/\:\ \ \:\ / /:/ ___ \__\:\ /__/:/ /:/\:\ /__/::::::::\ \033[0;00m"
echo -e "\033[38;5;208m \ \:\/:/__\/ \ \:\ /:/ /__/\ : |:| \ \:\/:/__\/ \ \:\~~\~~\/ \033[0;00m"
echo -e "\033[38;5;208m \ \::/ \ \:\/:/ \ \:\| |:| \ \::/ \ \:\ ~~~ \033[0;00m"
echo -e "\033[38;5;208m \ \:\ \ \::/ \ \:\__|:| \ \:\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m \ \:\ \__\/ \__\::::/ \ \:\ \ \:\ \033[0;00m"
echo -e "\033[38;5;208m \__\/ ~~~~ \__\/ \__\/ \033[0;00m"
echo -e "\033[38;5;208m \e[01;34m https://github.com/janlou/AdvanSource \e[00;37;40m"
echo ""
cat << EOF
$bld$f1▄ ▀▄ ▄▀ ▄ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4▄ ▀▄ ▄▀ ▄ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$bld$f1█▄█▀███▀█▄█ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4█▄█▀███▀█▄█ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$bld$f1▀█████████▀ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4▀█████████▀ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst
$bld$f1 ▄▀ ▀▄ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4 ▄▀ ▀▄ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst
EOF
sleep 1.3
# Checking if the bot folder is in HOME
echo -e "$bld$f4 CHECKING INSTALLED BOT...$rst"
sleep 0.5
ls ../ | grep $BOT > /dev/null
if [ $? != 0 ]; then
echo -e "$f1 ERROR: BOT: $BOT NOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 4
exit 1
fi
echo -e "$f2 $BOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 0.5
echo ""
echo -e "\033[38;5;208m 𝓟𝓸𝔀𝒆𝓻𝒆𝓭 𝓫𝔂: \033[0;00m"
echo -e "\033[38;5;208m Advan Team \033[0;00m"
echo -e "\033[38;5;208m @GPMod :) @cruel_channel \033[0;00m"
echo -e "\033[38;5;208m @GPMod :) @cruel_channel \033[0;00m"
# Starting preliminar setup
sleep 1.5
echo -e "$bld$f4 CHECKING PROCESSES...$rst"
sleep 0.7
# Looks for the number of screen/telegram-cli processes
SCREENNUM=`ps -e | grep -c screen`
CLINUM=`ps -e | grep -c telegram-cli`
if [ $SCREENNUM -ge 3 ]; then
echo -e "$f1 ERROR: MORE THAN 2 PROCESS OF SCREEN RUNNING.$rst"
echo -e "$f1 THESE PROCESSES HAVE BE KILLED. THEN RESTART THE SCRIPT$rst"
echo -e '$f1 RUN: "killall screen" $rst'
if [ $CLINUM -ge 2 ]; then
echo -e "$f1 ERROR: MORE THAN 1 PROCESS OF TELEGRAM-CLI RUNNING.$rst"
echo -e "$f1 THESE PROCESSES WILL BE KILLED. THEN RESTART THE SCRIPT$rst"
echo -e "$f1 RUN: killall telegram-cli $rst"
fi
sleep 4
exit 1
fi
echo "$f2 SCREEN NUMBER AND CLI NUMBER UNDER THE SUPPORTED LIMIT"
sleep 0.7
echo "$f2 RUNNING $SCREENNUM SCREEN PROCESS$rst"
echo "$f2 RUNNING $CLINUM TELEGRAM-CLI PROCESS$rst"
sleep 0.9
# Getting screen pid's
ps -e | grep screen | sed 's/^[[:space:]]*//' | cut -f 1 -d" " | while read -r line ; do
sleep 0.5
echo -e "$f2 SCREEN NUMBER $I PID: $line$rst"
if [ $I -eq 1 ]; then
echo $line > SC1
else
echo $line > SC2
fi
I=$(( $I + 1 ))
done
# I had some weird errors, so I had to do this silly fix:
SCREENPID1=`cat SC1`
SCREENPID2=`cat SC2`
rm SC1 SC2 >/dev/null
sleep 0.7
CLIPID=`ps -e | grep telegram-cli | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
if [ $CLINUM -eq 1 ]; then
echo -e "$f2 RUNNING ONE PROCESS OF TELEGRAM-CLI: $CLIPID1$rst"
echo -e "$bld$f4 KILLING TELEGRAM-CLI PROCESS. NOT NEEDED NOW$rst"
kill $CLIPID1
else
echo -e "$f2 RUNNING ZERO PROCESS OF TELEGRAM-CLI$rst"
fi
sleep 0.7
CLINUM=`ps -e | grep -c telegram-cli`
if [ $CLINUM -eq 1 ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PID COULDN'T BE KILLED. IGNORE.$rst"
fi
sleep 1
# =====Setup ends===== #
# Opening new screen in a daemon
echo -e "$bld$f4 ATTACHING SCREEN AS DAEMON...$rst"
# Better to clear cli status before
rm ../.telegram-cli/state > /dev/null
screen -d -m bash $LAUNCHER
sleep 1.3
SCREENNUM=`ps -e | grep -c screen`
if [ $SCREENNUM != 3 ]; then
echo -e "$f1 ERROR: SCREEN RUNNING: $SCREENNUM \n SCREEN ESPECTED: 3$rst"
exit 1
fi
# Getting screen info
sleep 0.7
echo -e "$bld$f4 RELOADING SCREEN INFO...$rst"
sleep 1
echo -e "$f2 NUMBER OF SCREEN ATTACHED: $SCREENNUM$rst"
echo -e "$f2 SECONDARY SCREEN: $SCREENPID1 AND $SCREENPID2$rst"
SCREEN=`ps -e | grep -v $SCREENPID1 | grep -v $SCREENPID2 | grep screen | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
sleep 0.5
echo -e "$f2 PRIMARY SCREEN: $SCREEN$rst"
sleep 0.7
echo -e "$bld$f4 RELOADING TELEGRAM-CLI INFO...$rst"
sleep 0.7
# Getting new telegram-cli PID
CLIPID=`ps -e | grep telegram-cli | sed 's/^[[:space:]]*//' |cut -f 1 -d" "`
echo -e "$f2 NEW TELEGRAM-CLI PID: $CLIPID$rst"
if [ -z "${CLIPID}" ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PROCESS NOT RUNNING$rst"
sleep 3
exit 1
fi
# Locating telegram-cli status
cat /proc/$CLIPID/task/$CLIPID/status > STATUS
NONVOLUNTARY=`grep nonvoluntary STATUS | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
sleep 5
# :::::::::::::::::::::::::
# ::::::: MAIN LOOP :::::::
# :::::::::::::::::::::::::
while true; do
echo -e "$f2 TIMES CHECKED AND RUNNING:$f5 $OK $rst"
echo -e "$f2 TIMES FAILED AND RECOVERED:$f5 $BAD $rst"
echo ""
cat /proc/$CLIPID/task/$CLIPID/status > CHECK
VOLUNTARYCHECK=`grep voluntary CHECK | head -1 | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
NONVOLUNTARYCHECK=`grep nonvoluntary CHECK | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
#echo -e "NONVOLUNTARYCHECK CTXT SWITCHES: $NONVOLUNTARYCHECK"
#echo -e "NONVOLUNTARY CTXT SWITCHES: $NONVOLUNTARY"
if [ $NONVOLUNTARY != $NONVOLUNTARYCHECK ] || [ $VOLUNTARY != $VOLUNTARYCHECK ]; then
echo -e "$f5 BOT RUNNING!$rst"
OK=$(( $OK + 1 ))
else
echo -e "$f5 BOT NOT RUNING, TRYING TO RELOAD IT...$rst"
BAD=$(( $BAD + 1 ))
sleep 1
rm ../.telegram-cli/state > /dev/null
kill $CLIPID
kill $SCREEN
screen -d -m bash $LAUNCHER
sleep 1
CLIPID=`ps -e | grep telegram-cli | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
if [ -z "${CLIPID}" ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PROCESS NOT RUNNING$rst"
echo -e "$f1 FAILED TO RECOVER BOT$rst"
sleep 1
fi
SCREENNUM=`ps -e | grep -c screen`
if [ $SCREENNUM != 3 ]; then
echo -e "$f1 ERROR: SCREEN RUNNING: $SCREENNUM \n SCREEN ESPECTED: 3$rst"
echo -e "$f1 FAILED TO RECOVER BOT$rst"
exit 1
fi
SCREEN=`ps -e | grep -v $SCREENPID1 | grep -v $SCREENPID2 | grep screen | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
echo -e "$f5 BOT HAS BEEN SUCCESFULLY RELOADED!$rst"
echo -e "$f2 TELEGRAM-CLI NEW PID: $CLIPID$rst"
echo -e "$f2 SCREEN NEW PID: $SCREEN$rst"
sleep 3
fi
VOLUNTARY=`echo $VOLUNTARYCHECK`
NONVOLUNTARY=`echo $NONVOLUNTARYCHECK`
sleep $RELOADTIME
rm CHECK
done
}
function tmux_detached {
clear
TMUX= tmux new-session -d -s script_detach "bash steady.sh -t"
echo -e "\e[1m"
echo -e ""
echo "Bot running in the backgroud with TMUX"
echo ""
echo -e "\e[0m"
sleep 3
tmux kill-session script
exit 1
}
function screen_detached {
clear
screen -d -m bash $LAUNCHER
echo -e "\e[1m"
echo -e ""
echo "Bot running in the backgroud with SCREEN"
echo ""
echo -e "\e[0m"
sleep 3
quit
exit 1
}
if [ $# -eq 0 ]
then
echo -e "\e[1m"
echo -e ""
echo "Missing options!"
echo "Run: bash steady.sh -h for help!"
echo ""
echo -e "\e[0m"
sleep 1
exit 1
fi
while getopts ":tsTSih" opt; do
case $opt in
t)
echo -e "\e[1m"
echo -e ""
echo "TMUX multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
tmux_mode
exit 1
;;
s)
echo -e "\e[1m"
echo -e ""
echo "SCREEN multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
screen_mode
exit 1
;;
T)
echo -e "\e[1m"
echo -e ""
echo "TMUX multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
tmux_detached
exit 1
;;
S)
echo -e "\e[1m"
echo -e ""
echo "SCREEN multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
screen_detached
exit 1
;;
i)
echo -e "\e[1m"
echo -e ""
echo "steady.sh bash script v1 CRUEL 2016 GPMOD" >&2
echo ""
echo -e "\e[0m"
echo -e "\033[38;5;208m 𝓟𝓸𝔀𝒆𝓻𝒆𝓭 𝓫𝔂: \033[0;00m"
echo -e "\033[38;5;208m Advan Team \033[0;00m"
echo -e "\033[38;5;208m @GPMod :) @cruel_channel \033[0;00m"
echo -e "\033[38;5;208m @GPMod :) @cruel_channel \033[0;00m"
echo ""
exit 1
;;
h)
echo -e "\e[1m"
echo -e ""
echo "Usage:"
echo -e ""
echo "steady.sh -t"
echo "steady.sh -s"
echo "steady.sh -T"
echo "steady.sh -S"
echo "steady.sh -h"
echo "steady.sh -i"
echo ""
echo "Options:"
echo ""
echo " -t select TMUX terminal multiplexer"
echo " -s select SCREEN terminal multiplexer"
echo " -T select TMUX and detach session after start"
echo " -S select SCREEN and detach session after start"
echo " -h script options help page"
echo " -i information about the script"
echo -e "\e[0m"
exit 1
;;
\?)
echo -e "\e[1m"
echo -e ""
echo "Invalid option: -$OPTARG" >&2
echo "Run bash $0 -h for help"
echo -e "\e[0m"
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
|
Tele-Sped/Spam
|
steady.sh
|
Shell
|
gpl-2.0
| 22,252 |
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A collection of shell function definitions used by various build scripts
# in the Android NDK (Native Development Kit)
#
# Get current script name into PROGNAME
PROGNAME=`basename $0`
# Find the Android NDK root, assuming we are invoked from a script
# within its directory structure.
#
# $1: Variable name that will receive the path
# $2: Path of invoking script
find_ndk_root ()
{
# Try to auto-detect the NDK root by walking up the directory
# path to the current script.
local PROGDIR="`dirname \"$2\"`"
while [ -n "1" ] ; do
if [ -d "$PROGDIR/build/core" ] ; then
break
fi
if [ -z "$PROGDIR" -o "$PROGDIR" = '/' ] ; then
return 1
fi
PROGDIR="`cd \"$PROGDIR/..\" && pwd`"
done
eval $1="$PROGDIR"
}
# Put location of Android NDK into ANDROID_NDK_ROOT and
# perform a tiny amount of sanity check
#
if [ -z "$ANDROID_NDK_ROOT" ] ; then
find_ndk_root ANDROID_NDK_ROOT "$0"
if [ $? != 0 ]; then
echo "Please define ANDROID_NDK_ROOT to point to the root of your"
echo "Android NDK installation."
exit 1
fi
fi
echo "$ANDROID_NDK_ROOT" | grep -q -e " "
if [ $? = 0 ] ; then
echo "ERROR: The Android NDK installation path contains a space !"
echo "Please install to a different location."
exit 1
fi
if [ ! -d $ANDROID_NDK_ROOT ] ; then
echo "ERROR: Your ANDROID_NDK_ROOT variable does not point to a directory."
exit 1
fi
if [ ! -f $ANDROID_NDK_ROOT/build/core/ndk-common.sh ] ; then
echo "ERROR: Your ANDROID_NDK_ROOT variable does not point to a valid directory."
exit 1
fi
## Logging support
##
VERBOSE=${VERBOSE-yes}
VERBOSE2=${VERBOSE2-no}
# If NDK_LOGFILE is defined in the environment, use this as the log file
TMPLOG=
if [ -n "$NDK_LOGFILE" ] ; then
mkdir -p `dirname "$NDK_LOGFILE"` && touch "$NDK_LOGFILE"
TMPLOG="$NDK_LOGFILE"
fi
# Setup a log file where all log() and log2() output will be sent
#
# $1: log file path (optional)
#
setup_default_log_file ()
{
if [ -n "$NDK_LOGFILE" ] ; then
return
fi
if [ -n "$1" ] ; then
NDK_LOGFILE="$1"
else
NDK_LOGFILE=/tmp/ndk-log-$$.txt
fi
export NDK_LOGFILE
TMPLOG="$NDK_LOGFILE"
rm -rf "$TMPLOG" && mkdir -p `dirname "$TMPLOG"` && touch "$TMPLOG"
echo "To follow build in another terminal, please use: tail -F $TMPLOG"
}
dump ()
{
if [ -n "$TMPLOG" ] ; then
echo "$@" >> $TMPLOG
fi
echo "$@"
}
dump_n ()
{
if [ -n "$TMPLOG" ] ; then
printf %s "$@" >> $TMPLOG
fi
printf %s "$@"
}
log ()
{
if [ "$VERBOSE" = "yes" ] ; then
echo "$@"
else
if [ -n "$TMPLOG" ] ; then
echo "$@" >> $TMPLOG
fi
fi
}
log_n ()
{
if [ "$VERBOSE" = "yes" ] ; then
printf %s "$@"
else
if [ -n "$TMPLOG" ] ; then
printf %s "$@" >> $TMPLOG
fi
fi
}
log2 ()
{
if [ "$VERBOSE2" = "yes" ] ; then
echo "$@"
else
if [ -n "$TMPLOG" ] ; then
echo "$@" >> $TMPLOG
fi
fi
}
run ()
{
if [ "$VERBOSE" = "yes" ] ; then
echo "## COMMAND: $@"
"$@" 2>&1
else
if [ -n "$TMPLOG" ] ; then
echo "## COMMAND: $@" >> $TMPLOG
"$@" >>$TMPLOG 2>&1
else
"$@" > /dev/null 2>&1
fi
fi
}
run2 ()
{
if [ "$VERBOSE2" = "yes" ] ; then
echo "## COMMAND: $@"
"$@" 2>&1
elif [ "$VERBOSE" = "yes" ]; then
echo "## COMMAND: $@"
if [ -n "$TMPLOG" ]; then
echo "## COMMAND: $@" >> $TMPLOG
"$@" >>$TMPLOG 2>&1
else
"$@" > /dev/null 2>&1
fi
else
if [ -n "$TMPLOG" ]; then
"$@" >>$TMPLOG 2>&1
else
"$@" > /dev/null 2>&1
fi
fi
}
panic ()
{
dump "ERROR: $@"
exit 1
}
fail_panic ()
{
if [ $? != 0 ] ; then
dump "ERROR: $@"
exit 1
fi
}
## Utilities
##
# Return the value of a given named variable
# $1: variable name
#
# example:
# FOO=BAR
# BAR=ZOO
# echo `var_value $FOO`
# will print 'ZOO'
#
var_value ()
{
# find a better way to do that ?
eval echo "$`echo $1`"
}
# convert to uppercase
# assumes tr is installed on the platform ?
#
to_uppercase ()
{
echo $1 | tr "[:lower:]" "[:upper:]"
}
## First, we need to detect the HOST CPU, because proper HOST_ARCH detection
## requires platform-specific tricks.
##
HOST_EXE=""
HOST_OS=`uname -s`
case "$HOST_OS" in
Darwin)
HOST_OS=darwin
;;
Linux)
# note that building 32-bit binaries on x86_64 is handled later
HOST_OS=linux
;;
FreeBsd) # note: this is not tested
HOST_OS=freebsd
;;
CYGWIN*|*_NT-*)
HOST_OS=windows
HOST_EXE=.exe
if [ "x$OSTYPE" = xcygwin ] ; then
HOST_OS=cygwin
fi
;;
esac
log2 "HOST_OS=$HOST_OS"
log2 "HOST_EXE=$HOST_EXE"
## Now find the host architecture. This must correspond to the bitness of
## the binaries we're going to run with this NDK. Certain platforms allow
## you to use a 64-bit kernel with a 32-bit userland, and unfortunately
## commands like 'uname -m' only report the kernel bitness.
##
HOST_ARCH=`uname -m`
case "$HOST_ARCH" in
i?86) HOST_ARCH=x86
;;
amd64) HOST_ARCH=x86_64
;;
powerpc) HOST_ARCH=ppc
;;
esac
case "$HOST_OS-$HOST_ARCH" in
linux-x86_64|darwin-x86_64)
## On Linux or Darwin, a 64-bit kernel doesn't mean that the user-land
## is always 32-bit, so use "file" to determine the bitness of the shell
## that invoked us. The -L option is used to de-reference symlinks.
##
## Note that on Darwin, a single executable can contain both x86 and
## x86_64 machine code, so just look for x86_64 (darwin) or x86-64 (Linux)
## in the output.
##
file -L "$SHELL" | grep -q "x86[_-]64"
if [ $? != 0 ]; then
# $SHELL is not a 64-bit executable, so assume our userland is too.
log2 "Detected 32-bit userland on 64-bit kernel system!"
HOST_ARCH=x86
fi
;;
esac
log2 "HOST_ARCH=$HOST_ARCH"
# at this point, the supported values for HOST_ARCH are:
# x86
# x86_64
# ppc
#
# other values may be possible but haven't been tested
#
# at this point, the value of HOST_OS should be one of the following:
# linux
# darwin
# windows (MSys)
# cygwin
#
# Note that cygwin is treated as a special case because it behaves very differently
# for a few things. Other values may be possible but have not been tested
#
# define HOST_TAG as a unique tag used to identify both the host OS and CPU
# supported values are:
#
# linux-x86
# linux-x86_64
# darwin-x86
# darwin-x86_64
# darwin-ppc
# windows
# windows-x86_64
#
# other values are possible but were not tested.
#
compute_host_tag ()
{
HOST_TAG=${HOST_OS}-${HOST_ARCH}
# Special case for windows-x86 => windows
case $HOST_TAG in
windows-x86|cygwin-x86)
HOST_TAG="windows"
;;
esac
log2 "HOST_TAG=$HOST_TAG"
}
compute_host_tag
# Compute the number of host CPU cores an HOST_NUM_CPUS
#
case "$HOST_OS" in
linux)
HOST_NUM_CPUS=`cat /proc/cpuinfo | grep processor | wc -l`
;;
darwin|freebsd)
HOST_NUM_CPUS=`sysctl -n hw.ncpu`
;;
windows|cygwin)
HOST_NUM_CPUS=$NUMBER_OF_PROCESSORS
;;
*) # let's play safe here
HOST_NUM_CPUS=1
esac
log2 "HOST_NUM_CPUS=$HOST_NUM_CPUS"
# If BUILD_NUM_CPUS is not already defined in your environment,
# define it as the double of HOST_NUM_CPUS. This is used to
# run Make commands in parralles, as in 'make -j$BUILD_NUM_CPUS'
#
if [ -z "$BUILD_NUM_CPUS" ] ; then
BUILD_NUM_CPUS=`expr $HOST_NUM_CPUS \* 2`
fi
log2 "BUILD_NUM_CPUS=$BUILD_NUM_CPUS"
## HOST TOOLCHAIN SUPPORT
##
# force the generation of 32-bit binaries on 64-bit systems
#
FORCE_32BIT=no
force_32bit_binaries ()
{
if [ "$HOST_ARCH" = x86_64 ] ; then
log2 "Forcing generation of 32-bit host binaries on $HOST_ARCH"
FORCE_32BIT=yes
HOST_ARCH=x86
log2 "HOST_ARCH=$HOST_ARCH"
compute_host_tag
fi
}
# On Windows, cygwin binaries will be generated by default, but
# you can force mingw ones that do not link to cygwin.dll if you
# call this function.
#
disable_cygwin ()
{
if [ $HOST_OS = cygwin ] ; then
log2 "Disabling cygwin binaries generation"
CFLAGS="$CFLAGS -mno-cygwin"
LDFLAGS="$LDFLAGS -mno-cygwin"
HOST_OS=windows
compute_host_tag
fi
}
# Various probes are going to need to run a small C program
mkdir -p /tmp/ndk-$USER/tmp/tests
TMPC=/tmp/ndk-$USER/tmp/tests/test-$$.c
TMPO=/tmp/ndk-$USER/tmp/tests/test-$$.o
TMPE=/tmp/ndk-$USER/tmp/tests/test-$$$EXE
TMPL=/tmp/ndk-$USER/tmp/tests/test-$$.log
# cleanup temporary files
clean_temp ()
{
rm -f $TMPC $TMPO $TMPL $TMPE
}
# cleanup temp files then exit with an error
clean_exit ()
{
clean_temp
exit 1
}
# this function will setup the compiler and linker and check that they work as advertised
# note that you should call 'force_32bit_binaries' before this one if you want it to
# generate 32-bit binaries on 64-bit systems (that support it).
#
setup_toolchain ()
{
if [ -z "$CC" ] ; then
CC=gcc
fi
if [ -z "$CXX" ] ; then
CXX=g++
fi
if [ -z "$CXXFLAGS" ] ; then
CXXFLAGS="$CFLAGS"
fi
if [ -z "$LD" ] ; then
LD="$CC"
fi
log2 "Using '$CC' as the C compiler"
# check that we can compile a trivial C program with this compiler
mkdir -p $(dirname "$TMPC")
cat > $TMPC <<EOF
int main(void) {}
EOF
if [ "$FORCE_32BIT" = yes ] ; then
CC="$CC -m32"
CXX="$CXX -m32"
LD="$LD -m32"
compile
if [ $? != 0 ] ; then
# sometimes, we need to also tell the assembler to generate 32-bit binaries
# this is highly dependent on your GCC installation (and no, we can't set
# this flag all the time)
CFLAGS="$CFLAGS -Wa,--32"
compile
fi
fi
compile
if [ $? != 0 ] ; then
echo "your C compiler doesn't seem to work:"
cat $TMPL
clean_exit
fi
log "CC : compiler check ok ($CC)"
# check that we can link the trivial program into an executable
link
if [ $? != 0 ] ; then
OLD_LD="$LD"
LD="$CC"
compile
link
if [ $? != 0 ] ; then
LD="$OLD_LD"
echo "your linker doesn't seem to work:"
cat $TMPL
clean_exit
fi
fi
log2 "Using '$LD' as the linker"
log "LD : linker check ok ($LD)"
# check the C++ compiler
log2 "Using '$CXX' as the C++ compiler"
cat > $TMPC <<EOF
#include <iostream>
using namespace std;
int main()
{
cout << "Hello World!" << endl;
return 0;
}
EOF
compile_cpp
if [ $? != 0 ] ; then
echo "your C++ compiler doesn't seem to work"
cat $TMPL
clean_exit
fi
log "CXX : C++ compiler check ok ($CXX)"
# XXX: TODO perform AR checks
AR=ar
ARFLAGS=
}
# try to compile the current source file in $TMPC into an object
# stores the error log into $TMPL
#
compile ()
{
log2 "Object : $CC -o $TMPO -c $CFLAGS $TMPC"
$CC -o $TMPO -c $CFLAGS $TMPC 2> $TMPL
}
compile_cpp ()
{
log2 "Object : $CXX -o $TMPO -c $CXXFLAGS $TMPC"
$CXX -o $TMPO -c $CXXFLAGS $TMPC 2> $TMPL
}
# try to link the recently built file into an executable. error log in $TMPL
#
link()
{
log2 "Link : $LD -o $TMPE $TMPO $LDFLAGS"
$LD -o $TMPE $TMPO $LDFLAGS 2> $TMPL
}
# run a command
#
execute()
{
log2 "Running: $*"
$*
}
# perform a simple compile / link / run of the source file in $TMPC
compile_exec_run()
{
log2 "RunExec : $CC -o $TMPE $CFLAGS $TMPC"
compile
if [ $? != 0 ] ; then
echo "Failure to compile test program"
cat $TMPC
cat $TMPL
clean_exit
fi
link
if [ $? != 0 ] ; then
echo "Failure to link test program"
cat $TMPC
echo "------"
cat $TMPL
clean_exit
fi
$TMPE
}
pattern_match ()
{
echo "$2" | grep -q -E -e "$1"
}
# Let's check that we have a working md5sum here
check_md5sum ()
{
A_MD5=`echo "A" | md5sum | cut -d' ' -f1`
if [ "$A_MD5" != "bf072e9119077b4e76437a93986787ef" ] ; then
echo "Please install md5sum on this machine"
exit 2
fi
}
# Find if a given shell program is available.
# We need to take care of the fact that the 'which <foo>' command
# may return either an empty string (Linux) or something like
# "no <foo> in ..." (Darwin). Also, we need to redirect stderr
# to /dev/null for Cygwin
#
# $1: variable name
# $2: program name
#
# Result: set $1 to the full path of the corresponding command
# or to the empty/undefined string if not available
#
find_program ()
{
local PROG RET
PROG=`which $2 2>/dev/null`
RET=$?
if [ $RET != 0 ]; then
PROG=
fi
eval $1=\"$PROG\"
return $RET
}
prepare_download ()
{
find_program CMD_WGET wget
find_program CMD_CURL curl
find_program CMD_SCRP scp
}
# Download a file with either 'curl', 'wget' or 'scp'
#
# $1: source URL (e.g. http://foo.com, ssh://blah, /some/path)
# $2: target file
download_file ()
{
# Is this HTTP, HTTPS or FTP ?
if pattern_match "^(http|https|ftp):.*" "$1"; then
if [ -n "$CMD_WGET" ] ; then
run $CMD_WGET -O $2 $1
elif [ -n "$CMD_CURL" ] ; then
run $CMD_CURL -o $2 $1
else
echo "Please install wget or curl on this machine"
exit 1
fi
return
fi
# Is this SSH ?
# Accept both ssh://<path> or <machine>:<path>
#
if pattern_match "^(ssh|[^:]+):.*" "$1"; then
if [ -n "$CMD_SCP" ] ; then
scp_src=`echo $1 | sed -e s%ssh://%%g`
run $CMD_SCP $scp_src $2
else
echo "Please install scp on this machine"
exit 1
fi
return
fi
# Is this a file copy ?
# Accept both file://<path> or /<path>
#
if pattern_match "^(file://|/).*" "$1"; then
cp_src=`echo $1 | sed -e s%^file://%%g`
run cp -f $cp_src $2
return
fi
}
# Unpack a given archive
#
# $1: archive file path
# $2: optional target directory (current one if omitted)
#
unpack_archive ()
{
local ARCHIVE="$1"
local DIR=${2-.}
local RESULT TARFLAGS ZIPFLAGS
mkdir -p "$DIR"
if [ "$VERBOSE2" = "yes" ] ; then
TARFLAGS="vxpf"
ZIPFLAGS=""
else
TARFLAGS="xpf"
ZIPFLAGS="q"
fi
case "$ARCHIVE" in
*.zip)
(cd $DIR && run unzip $ZIPFLAGS "$ARCHIVE")
;;
*.tar)
run tar $TARFLAGS "$ARCHIVE" -C $DIR
;;
*.tar.gz)
run tar z$TARFLAGS "$ARCHIVE" -C $DIR
;;
*.tar.bz2)
run tar j$TARFLAGS "$ARCHIVE" -C $DIR
;;
*)
panic "Cannot unpack archive with unknown extension: $ARCHIVE"
;;
esac
}
# Pack a given archive
#
# $1: archive file path (including extension)
# $2: source directory for archive content
# $3+: list of files (including patterns), all if empty
pack_archive ()
{
local ARCHIVE="$1"
local SRCDIR="$2"
local SRCFILES
local TARFLAGS ZIPFLAGS
shift; shift;
if [ -z "$1" ] ; then
SRCFILES="*"
else
SRCFILES="$@"
fi
if [ "`basename $ARCHIVE`" = "$ARCHIVE" ] ; then
ARCHIVE="`pwd`/$ARCHIVE"
fi
mkdir -p `dirname $ARCHIVE`
if [ "$VERBOSE2" = "yes" ] ; then
TARFLAGS="vcf"
ZIPFLAGS="-9r"
else
TARFLAGS="cf"
ZIPFLAGS="-9qr"
fi
case "$ARCHIVE" in
*.zip)
(cd $SRCDIR && run zip $ZIPFLAGS "$ARCHIVE" $SRCFILES)
;;
*.tar)
(cd $SRCDIR && run tar $TARFLAGS "$ARCHIVE" $SRCFILES)
;;
*.tar.gz)
(cd $SRCDIR && run tar z$TARFLAGS "$ARCHIVE" $SRCFILES)
;;
*.tar.bz2)
(cd $SRCDIR && run tar j$TARFLAGS "$ARCHIVE" $SRCFILES)
;;
*)
panic "Unsupported archive format: $ARCHIVE"
;;
esac
}
# Copy a directory, create target location if needed
#
# $1: source directory
# $2: target directory location
#
copy_directory ()
{
local SRCDIR="$1"
local DSTDIR="$2"
if [ ! -d "$SRCDIR" ] ; then
panic "Can't copy from non-directory: $SRCDIR"
fi
log "Copying directory: "
log " from $SRCDIR"
log " to $DSTDIR"
mkdir -p "$DSTDIR" && (cd "$SRCDIR" && 2>/dev/null tar cf - *) | (tar xf - -C "$DSTDIR")
fail_panic "Cannot copy to directory: $DSTDIR"
}
# This is the same than copy_directory(), but symlinks will be replaced
# by the file they actually point to instead.
copy_directory_nolinks ()
{
local SRCDIR="$1"
local DSTDIR="$2"
if [ ! -d "$SRCDIR" ] ; then
panic "Can't copy from non-directory: $SRCDIR"
fi
log "Copying directory (without symlinks): "
log " from $SRCDIR"
log " to $DSTDIR"
mkdir -p "$DSTDIR" && (cd "$SRCDIR" && tar chf - *) | (tar xf - -C "$DSTDIR")
fail_panic "Cannot copy to directory: $DSTDIR"
}
# Copy certain files from one directory to another one
# $1: source directory
# $2: target directory
# $3+: file list (including patterns)
copy_file_list ()
{
local SRCDIR="$1"
local DSTDIR="$2"
shift; shift;
if [ ! -d "$SRCDIR" ] ; then
panic "Cant' copy from non-directory: $SRCDIR"
fi
log "Copying file: $@"
log " from $SRCDIR"
log " to $DSTDIR"
mkdir -p "$DSTDIR" && (cd "$SRCDIR" && tar cf - "$@") | (tar xf - -C "$DSTDIR")
fail_panic "Cannot copy files to directory: $DSTDIR"
}
# Rotate a log file
# If the given log file exist, add a -1 to the end of the file.
# If older log files exist, rename them to -<n+1>
# $1: log file
# $2: maximum version to retain [optional]
rotate_log ()
{
# Default Maximum versions to retain
local MAXVER="5"
local LOGFILE="$1"
shift;
if [ ! -z "$1" ] ; then
local tmpmax="$1"
shift;
tmpmax=`expr $tmpmax + 0`
if [ $tmpmax -lt 1 ] ; then
panic "Invalid maximum log file versions '$tmpmax' invalid; defaulting to $MAXVER"
else
MAXVER=$tmpmax;
fi
fi
# Do Nothing if the log file does not exist
if [ ! -f "${LOGFILE}" ] ; then
return
fi
# Rename existing older versions
ver=$MAXVER
while [ $ver -ge 1 ]
do
local prev=$(( $ver - 1 ))
local old="-$prev"
# Instead of old version 0; use the original filename
if [ $ver -eq 1 ] ; then
old=""
fi
if [ -f "${LOGFILE}${old}" ] ; then
mv -f "${LOGFILE}${old}" "${LOGFILE}-${ver}"
fi
ver=$prev
done
}
|
rex-xxx/mt6572_x201
|
ndk/build/core/ndk-common.sh
|
Shell
|
gpl-2.0
| 19,715 |
# load with: . ipython-completion.bash
if [[ -n ${ZSH_VERSION-} ]]; then
autoload -Uz bashcompinit && bashcompinit
fi
_ipython_get_flags()
{
local url=$1
local var=$2
local dash=$3
if [[ "$url $var" == $__ipython_complete_last ]]; then
opts=$__ipython_complete_last_res
return
fi
# matplotlib and profile don't need the = and the
# version without simplifies the special cased completion
opts=$(ipython ${url} --help-all | grep -E "^-{1,2}[^-]" | sed -e "s/<.*//" -e "s/[^=]$/& /" -e "s/^--matplotlib=$//" -e "s/^--profile=$/--profile /" -e "$ s/^/\n-h\n--help\n--help-all\n/")
__ipython_complete_last="$url $var"
__ipython_complete_last_res="$opts"
}
_ipython()
{
local cur=${COMP_WORDS[COMP_CWORD]}
local prev=${COMP_WORDS[COMP_CWORD - 1]}
local subcommands="kernel profile locate history"
local opts="help"
if [ -z "$__ipython_complete_baseopts" ]; then
_ipython_get_flags baseopts
__ipython_complete_baseopts="${opts}"
fi
local baseopts="$__ipython_complete_baseopts"
local mode=""
for i in "${COMP_WORDS[@]}"; do
[ "$cur" = "$i" ] && break
if [[ ${subcommands} == *${i}* ]]; then
mode="$i"
break
elif [[ ${i} == "--"* ]]; then
mode="nosubcommand"
break
fi
done
if [[ ${cur} == -* ]]; then
case $mode in
"kernel")
_ipython_get_flags $mode
opts=$"${opts} ${baseopts}"
;;
"locate" | "profile")
_ipython_get_flags $mode
;;
"history")
if [[ $COMP_CWORD -ge 3 ]]; then
# 'history trim' and 'history clear' covered by next line
_ipython_get_flags $mode\ "${COMP_WORDS[2]}"
else
_ipython_get_flags $mode
fi
opts=$"${opts}"
;;
*)
opts=$baseopts
esac
# don't drop the trailing space
local IFS=$'\t\n'
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
elif [[ $mode == "profile" ]]; then
opts="list create locate "
local IFS=$'\t\n'
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ $mode == "history" ]]; then
if [[ $COMP_CWORD -ge 3 ]]; then
# drop into flags
opts="--"
else
opts="trim clear "
fi
local IFS=$'\t\n'
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ $mode == "locate" ]]; then
if [[ $COMP_CWORD -ge 3 ]]; then
# drop into flags
opts="--"
else
opts="profile "
fi
local IFS=$'\t\n'
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
elif [[ ${prev} == "--matplotlib"* ]] || [[ ${prev} == "--gui"* ]]; then
if [ -z "$__ipython_complete_matplotlib" ]; then
__ipython_complete_matplotlib=`cat <<EOF | python -
try:
import IPython.core.shellapp as mod;
for k in mod.InteractiveShellApp.matplotlib.values:
print "%s " % k
except:
pass
EOF
`
fi
local IFS=$'\t\n'
COMPREPLY=( $(compgen -W "${__ipython_complete_matplotlib}" -- ${cur}) )
elif [[ ${prev} == "--profile"* ]]; then
if [ -z "$__ipython_complete_profiles" ]; then
__ipython_complete_profiles=`cat <<EOF | python -
try:
import IPython.core.profileapp
for k in IPython.core.profileapp.list_bundled_profiles():
print "%s " % k
p = IPython.core.profileapp.ProfileList()
for k in IPython.core.profileapp.list_profiles_in(p.ipython_dir):
print "%s " % k
except:
pass
EOF
`
fi
local IFS=$'\t\n'
COMPREPLY=( $(compgen -W "${__ipython_complete_profiles}" -- ${cur}) )
else
if [ "$COMP_CWORD" == 1 ]; then
local IFS=$'\t\n'
local sub=$(echo $subcommands | sed -e "s/ / \t/g")
COMPREPLY=( $(compgen -W "${sub}" -- ${cur}) )
else
COMPREPLY=( $(compgen -f -- ${cur}) )
fi
fi
}
complete -o default -o nospace -F _ipython ipython
|
pacoqueen/ginn
|
extra/install/ipython2/ipython-5.10.0/examples/IPython Kernel/ipython-completion.bash
|
Shell
|
gpl-2.0
| 4,271 |
#!/bin/sh
#2007 Lesser GPL licence v2 (http://www.fsf.org/licensing/licenses/lgpl.html)
#make the pup_save.2fs file bigger.
#v412 /etc/DISTRO_SPECS, renamed pup_xxx.sfs, pup_save.2fs etc.
#v555 pup files renamed to woofr555.sfs, woofsave.2fs.
#100913 simplified filenames, minor update of comments.
#120202 rodin.s: internationalized.
#120323 partial replace 'xmessage' with 'pupmessage'.
#130715 some translation fixes.
#131223 gtkdialog
#131226 rodin.s: updating i18n
export TEXTDOMAIN=resizepfile.sh
export TEXTDOMAINDIR=/usr/share/locale
export OUTPUT_CHARSET=UTF-8
. gettext.sh
#variables created at bootup by /initrd/usr/sbin/init...
. /etc/rc.d/PUPSTATE
. /etc/DISTRO_SPECS #v412
SAVELOC=$(echo $PUPSAVE | cut -f3 -d ',')
[ -d /mnt/home$SAVELOC ] && /usr/lib/gtkdialog/box_ok "$(gettext 'Resize personal storage file')" info "<b>$(gettext "Puppy is currently using a savefolder. There is no need to resize it")</b>" " " && exit 0
#find out what modes use a ${DISTRO_FILE_PREFIX}save.2fs file...
CANDOIT="no"
case $PUPMODE in
"12") #${DISTRO_FILE_PREFIX}save.3fs (pup_rw), nothing on pup_ro1, ${DISTRO_PUPPYSFS} (pup_ro2).
PERSISTMNTPT="/initrd/pup_rw"
CANDOIT="yes"
;;
"13") #tmpfs (pup_rw), ${DISTRO_FILE_PREFIX}save.3fs (pup_ro1), ${DISTRO_PUPPYSFS} (pup_ro2).
PERSISTMNTPT="/initrd/pup_ro1"
CANDOIT="yes"
;;
esac
if [ "$CANDOIT" != "yes" ];then
/usr/lib/gtkdialog/box_ok "$(gettext 'Resize personal storage file')" error "<b>$(gettext "Sorry, Puppy is not currently using a personal persistent storage file.")</b>" " " "$(eval_gettext "If this is the first time that you booted Puppy, say from a live-CD, you are currently running totally in RAM and you will be asked to create a personal storage file when you end the session (shutdown the PC or reboot). Note, the file will be named \${DISTRO_FILE_PREFIX}save.2fs and will be created in a place that you nominate.")
$(eval_gettext "If you have installed Puppy to hard drive, or installed such that personal storage is an entire partition, then you will not have a \${DISTRO_FILE_PREFIX}save.2fs file either.")"
exit
fi
[ ! "$PUPSAVE" ] && exit #precaution
[ ! "$PUP_HOME" ] && exit #precaution.
SAVEFS="`echo -n "$PUPSAVE" | cut -f 2 -d ','`"
SAVEPART="`echo -n "$PUPSAVE" | cut -f 1 -d ','`"
SAVEFILE="`echo -n "$PUPSAVE" | cut -f 3 -d ','`"
NAMEPFILE="`basename $SAVEFILE`"
HOMELOCATION="/initrd${PUP_HOME}${SAVEFILE}"
SIZEFREE=`df -m | grep "$PERSISTMNTPT" | tr -s " " | cut -f 4 -d " "` #free space in ${DISTRO_FILE_PREFIX}save.3fs
ACTUALSIZK=`stat -c %s $HOMELOCATION` #total size of ${DISTRO_FILE_PREFIX}save.3fs
ACTUALSIZE=`expr $ACTUALSIZK \/ 1024 \/ 1024`
APATTERN="/dev/${SAVEPART} "
PARTSIZE=`df -m | grep "$APATTERN" | tr -s " " | cut -f 2 -d " "`
PARTFREE=`df -m | grep "$APATTERN" | tr -s " " | cut -f 4 -d " "`
. /usr/lib/gtkdialog/svg_bar 200 "$(((($ACTUALSIZE-$SIZEFREE)*200/$ACTUALSIZE)))" "$ACTUALSIZE Mb / $SIZEFREE Mb $(gettext 'free')" > /tmp/resizepfile_pfile.svg
. /usr/lib/gtkdialog/svg_bar 200 "$(((($PARTSIZE-$PARTFREE)*200/$PARTSIZE)))" "$PARTSIZE Mb / $PARTFREE Mb $(gettext 'free')" > /tmp/resizepfile_partition.svg
x='
<window title="'$(gettext 'Resize Personal Storage File')'" icon-name="gtk-refresh">
<vbox space-expand="true" space-fill="true">
'"$(/usr/lib/gtkdialog/xml_info fixed puppy_increase.svg 60 "$(eval_gettext "<b>Your personal file is \${NAMEPFILE},</b> and this contains user data, configuration files, email, newsgroup cache, history files and installed packages...")" "$(eval_gettext "If you see that you are running low on space in \$NAMEPFILE, you can make it bigger, but of course there must be enough space in \$SAVEPART.")")"'
<vbox space-expand="true" space-fill="true">
<frame>
<text height-request="5"><label>""</label></text>
<vbox space-expand="true" space-fill="true">
<vbox space-expand="false" space-fill="false">
<hbox>
<text xalign="0" use-markup="true"><label>"<b>'$(gettext 'Personal File')'</b>: '$NAMEPFILE'"</label></text>
<text space-expand="true" space-fill="true"><label>""</label></text>
<pixmap><input file>/tmp/resizepfile_pfile.svg</input></pixmap>
</hbox>
<hbox>
<text xalign="0" use-markup="true"><label>"<b>'$(gettext 'Partition')'</b>: '$SAVEPART'"</label></text>
<text space-expand="true" space-fill="true"><label>""</label></text>
<pixmap><input file>/tmp/resizepfile_partition.svg</input></pixmap>
</hbox>
</vbox>
<text height-request="5" space-expand="true" space-fill="true"><label>""</label></text>
<vbox space-expand="false" space-fill="false">
<hbox space-expand="true" space-fill="true">
<text xalign="0" space-expand="true" space-fill="true"><label>'$(eval_gettext "Increase size of \$NAMEPFILE by amount (Mb). You cannot make it smaller.")'</label></text>
<comboboxtext width-request="100" space-expand="false" space-fill="false">
<variable>KILOBIG</variable>
<item>32</item>
<item>64</item>
<item>128</item>
<item>256</item>
<item>512</item>
<item>1024</item>
<item>2048</item>
<item>4096</item>
</comboboxtext>
</hbox>
</vbox>
<text height-request="10"><label>""</label></text>
</vbox>
</frame>
</vbox>
<hbox space-expand="false" space-fill="false">
'"`/usr/lib/gtkdialog/xml_pixmap nb`"'
<text xalign="0" use-markup="true" space-expand="true" space-fill="true"><label>"<b>'$(gettext 'Resizing requires a system reboot')'</b>"</label></text>
<button space-expand="false" space-fill="false">
<label>'$(gettext "Cancel")'</label>
'"`/usr/lib/gtkdialog/xml_button-icon cancel`"'
<action type="exit">EXIT_NOW</action>
</button>
<button space-expand="false" space-fill="false">
<label>'$(gettext "Ok")'</label>
'"`/usr/lib/gtkdialog/xml_button-icon ok`"'
<action type="exit">save</action>
</button>
</hbox>
</vbox>
</window>'
export resize="$x"
. /usr/lib/gtkdialog/xml_info gtk > /dev/null #build bg_pixmap for gtk-theme
eval $(gtkdialog -p resize)
case ${EXIT} in
save)KILOBIG=$(($KILOBIG * 1024))
echo "KILOBIG=$KILOBIG" > /initrd${PUP_HOME}/pupsaveresizenew.txt
echo "PUPSAVEFILEX=$SAVEFILE" >> /initrd${PUP_HOME}/pupsaveresizenew.txt #131231
;;
*)
exit
;;
esac
/usr/lib/gtkdialog/box_ok "$(gettext 'Resize personal storage file')" complete "$(eval_gettext "Okay, you have chosen to <b>increase \${NAMEPFILE} by \${KILOBIG} Kbytes</b>, however as the file is currently in use, it will happen at reboot.")" " " "$(gettext 'Technical notes:')" "$(eval_gettext "The required size increase has been written to file pupsaveresizenew.txt, in partition \${SAVEPART} (currently mounted on /mnt/home).")" "$(gettext 'File pupsaveresizenew.txt will be read at bootup and the resize performed then pupsaveresizenew.txt will be deleted.')" "$(eval_gettext "WARNING: If you have multiple \${DISTRO_FILE_PREFIX}save files, be sure to select the same one when you reboot.")" " " "<b>$(gettext 'You can keep using Puppy. The change will only happen at reboot.')</b>"
###END###
#notes:
# dd if=/dev/zero bs=1k count=$KILOBIG | tee -a $HOMELOCATION > /dev/null
|
dimkr/woof-CE-libre
|
woof-code/rootfs-skeleton/usr/sbin/resizepfile.sh
|
Shell
|
gpl-2.0
| 7,396 |
#!/bin/sh
PATH=/bin:/sbin
SQUASH_IMG=/squash/root.img
SQUASH_MNT=/squash/root
SQUASH_MNT_REC=/squash/mounts
echo $SQUASH_MNT > $SQUASH_MNT_REC
# Following mount points are neccessary for mounting a squash image
[ ! -d /proc/self ] && \
mount -t proc -o nosuid,noexec,nodev proc /proc
[ ! -d /sys/kernel ] && \
mount -t sysfs -o nosuid,noexec,nodev sysfs /sys
[ ! -e /dev/loop-control ] && \
mount -t devtmpfs -o mode=0755,noexec,nosuid,strictatime devtmpfs /dev
# Need a loop device backend, overlayfs, and squashfs module
modprobe loop
if [ $? != 0 ]; then
echo "Unable to setup loop module"
fi
modprobe squashfs
if [ $? != 0 ]; then
echo "Unable to setup squashfs module"
fi
modprobe overlay
if [ $? != 0 ]; then
echo "Unable to setup overlay module"
fi
[ ! -d "$SQUASH_MNT" ] && \
mkdir -m 0755 -p $SQUASH_MNT
# Mount the squashfs image
mount -t squashfs -o ro,loop $SQUASH_IMG $SQUASH_MNT
if [ $? != 0 ]; then
echo "Unable to mount squashed initramfs image"
fi
for file in $SQUASH_MNT/*; do
file=${file#$SQUASH_MNT/}
lowerdir=$SQUASH_MNT/$file
workdir=/squash/overlay-work/$file
upperdir=/$file
mntdir=/$file
mkdir -m 0755 -p $workdir
mkdir -m 0755 -p $mntdir
mount -t overlay overlay -o\
lowerdir=$lowerdir,upperdir=$upperdir,workdir=$workdir $mntdir
echo $mntdir >> $SQUASH_MNT_REC
done
|
yuwata/dracut
|
modules.d/99squash/setup-squash.sh
|
Shell
|
gpl-2.0
| 1,349 |
#!/bin/bash
# get paths and jvm settings:
source ./env.sh
# get marlin settings and boot class path:
source ./env_marlin.sh
# Enable stats
CHECK=true
STATS=false
JAVA_OPTS="-DPNGImageWriter.level=4 -Dsun.java2d.renderer.doChecks=$CHECK -Dsun.java2d.renderer.doStats=$STATS $JAVA_OPTS"
echo "CLASSPATH: $CLASSPATH"
echo "Boot CP: $BOOTCLASSPATH"
echo "Java opts: $JAVA_OPTS"
echo "Java tuning: $JAVA_TUNING"
echo "JVM path"
which java
echo "Java version"
java -version
# longer (shared 4T) test:
PROFILE=longer_shared.properties
java -Dmapbench.profile=$PROFILE $BOOTCLASSPATH $JAVA_OPTS $JAVA_TUNING -cp $CLASSPATH it.geosolutions.java2d.MapDisplay
# scaling test (1T but image x4):
PROFILE=scaleTest.properties
java -Dmapbench.profile=$PROFILE $BOOTCLASSPATH $JAVA_OPTS $JAVA_TUNING -cp $CLASSPATH it.geosolutions.java2d.MapDisplay
# Use dashed stroke
PROFILE=longer_shared_dashed.properties
java -Dmapbench.profile=$PROFILE $BOOTCLASSPATH $JAVA_OPTS $JAVA_TUNING -cp $CLASSPATH it.geosolutions.java2d.MapDisplay
# complex affine transform test:
PROFILE=cpxTransform.properties
java -Dmapbench.profile=$PROFILE $BOOTCLASSPATH $JAVA_OPTS $JAVA_TUNING -cp $CLASSPATH it.geosolutions.java2d.MapDisplay
# even_odd winding rule test:
PROFILE=evenOddWindingRule.properties
java -Dmapbench.profile=$PROFILE $BOOTCLASSPATH $JAVA_OPTS $JAVA_TUNING -cp $CLASSPATH it.geosolutions.java2d.MapDisplay
# use createStrokedShape() + fill() test:
PROFILE=strokedShape.properties
java -Dmapbench.profile=$PROFILE $BOOTCLASSPATH $JAVA_OPTS $JAVA_TUNING -cp $CLASSPATH it.geosolutions.java2d.MapDisplay
|
bourgesl/mapbench
|
bin/reg_marlin.sh
|
Shell
|
gpl-2.0
| 1,610 |
#!/bin/sh
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
# returns OK if $1 contains $2
strstr() {
[ "${1#*$2*}" != "$1" ]
}
# returns OK if $1 contains $2 at the beginning
str_starts() {
[ "${1#$2*}" != "$1" ]
}
# replaces all occurrences of 'search' in 'str' with 'replacement'
#
# str_replace str search replacement
#
# example:
# str_replace ' one two three ' ' ' '_'
str_replace() {
local in="$1"; local s="$2"; local r="$3"
local out=''
while strstr "${in}" "$s"; do
chop="${in%%$s*}"
out="${out}${chop}$r"
in="${in#*$s}"
done
echo "${out}${in}"
}
_getcmdline() {
local _line
local _i
unset _line
if [ -z "$CMDLINE" ]; then
if [ -e /etc/cmdline ]; then
while read -r _line; do
CMDLINE_ETC="$CMDLINE_ETC $_line";
done </etc/cmdline;
fi
for _i in /etc/cmdline.d/*.conf; do
[ -e "$_i" ] || continue
while read -r _line; do
CMDLINE_ETC_D="$CMDLINE_ETC_D $_line";
done <"$_i";
done
read -r CMDLINE </proc/cmdline;
CMDLINE="$CMDLINE_ETC_D $CMDLINE_ETC $CMDLINE"
fi
}
_dogetarg() {
local _o _val _doecho
unset _val
unset _o
unset _doecho
_getcmdline
for _o in $CMDLINE; do
if [ "${_o%%=*}" = "${1%%=*}" ]; then
if [ -n "${1#*=}" -a "${1#*=*}" != "${1}" ]; then
# if $1 has a "=<value>", we want the exact match
if [ "$_o" = "$1" ]; then
_val="1";
unset _doecho
fi
continue
fi
if [ "${_o#*=}" = "$_o" ]; then
# if cmdline argument has no "=<value>", we assume "=1"
_val="1";
unset _doecho
continue
fi
_val=${_o#*=};
_doecho=1
fi
done
if [ -n "$_val" ]; then
[ "x$_doecho" != "x" ] && echo "$_val";
return 0;
fi
return 1;
}
getarg() {
set +x
while [ $# -gt 0 ]; do
case $1 in
-y) if _dogetarg $2 >/dev/null; then
echo 1
[ "$RD_DEBUG" = "yes" ] && set -x
return 0
fi
shift 2;;
-n) if _dogetarg $2 >/dev/null; then
echo 0;
[ "$RD_DEBUG" = "yes" ] && set -x
return 1
fi
shift 2;;
*) if _dogetarg $1; then
[ "$RD_DEBUG" = "yes" ] && set -x
return 0;
fi
shift;;
esac
done
[ "$RD_DEBUG" = "yes" ] && set -x
return 1
}
getargbool() {
local _b
unset _b
local _default
_default=$1; shift
_b=$(getarg "$@")
[ $? -ne 0 -a -z "$_b" ] && _b=$_default
if [ -n "$_b" ]; then
[ $_b = "0" ] && return 1
[ $_b = "no" ] && return 1
fi
return 0
}
_dogetargs() {
set +x
local _o _found _key
unset _o
unset _found
_getcmdline
_key=$1
set --
for _o in $CMDLINE; do
if [ "$_o" = "$_key" ]; then
_found=1;
elif [ "${_o%%=*}" = "${_key%=}" ]; then
[ -n "${_o%%=*}" ] && set -- "$@" "${_o#*=}";
_found=1;
fi
done
if [ -n "$_found" ]; then
[ $# -gt 0 ] && echo -n "$@"
return 0
fi
return 1;
}
getargs() {
set +x
local _val _i _args _gfound
unset _val
unset _gfound
_args="$@"
set --
for _i in $_args; do
_val="$(_dogetargs $_i)"
[ $? -eq 0 ] && _gfound=1
[ -n "$_val" ] && set -- "$@" "$_val"
done
if [ -n "$_gfound" ]; then
if [ $# -gt 0 ]; then
echo -n "$@"
else
echo -n 1
fi
[ "$RD_DEBUG" = "yes" ] && set -x
return 0
fi
[ "$RD_DEBUG" = "yes" ] && set -x
return 1;
}
# Prints value of given option. If option is a flag and it's present,
# it just returns 0. Otherwise 1 is returned.
# $1 = options separated by commas
# $2 = option we are interested in
#
# Example:
# $1 = cipher=aes-cbc-essiv:sha256,hash=sha256,verify
# $2 = hash
# Output:
# sha256
getoptcomma() {
local line=",$1,"; local opt="$2"; local tmp
case "${line}" in
*,${opt}=*,*)
tmp="${line#*,${opt}=}"
echo "${tmp%%,*}"
return 0
;;
*,${opt},*) return 0;;
esac
return 1
}
# Splits given string 'str' with separator 'sep' into variables 'var1', 'var2',
# 'varN'. If number of fields is less than number of variables, remaining are
# not set. If number of fields is greater than number of variables, the last
# variable takes remaining fields. In short - it acts similary to 'read'.
#
# splitsep sep str var1 var2 varN
#
# example:
# splitsep ':' 'foo:bar:baz' v1 v2
# in result:
# v1='foo', v2='bar:baz'
#
# TODO: ':' inside fields.
splitsep() {
local sep="$1"; local str="$2"; shift 2
local tmp
while [ -n "$str" -a -n "$*" ]; do
tmp="${str%%$sep*}"
eval "$1=${tmp}"
str="${str#$tmp}"
str="${str#$sep}"
shift
done
return 0
}
setdebug() {
if [ -z "$RD_DEBUG" ]; then
if [ -e /proc/cmdline ]; then
RD_DEBUG=no
if getargbool 0 rd.debug -y rdinitdebug -y rdnetdebug; then
RD_DEBUG=yes
[ -n "$BASH" ] && \
export PS4='${BASH_SOURCE}@${LINENO}(${FUNCNAME[0]}): ';
fi
fi
export RD_DEBUG
fi
[ "$RD_DEBUG" = "yes" ] && set -x
}
setdebug
source_all() {
local f
[ "$1" ] && [ -d "/$1" ] || return
for f in "/$1"/*.sh; do [ -e "$f" ] && . "$f"; done
}
hookdir=/lib/dracut/hooks
export hookdir
source_hook() {
source_all "/lib/dracut/hooks/$1"
}
check_finished() {
local f
for f in $hookdir/initqueue/finished/*.sh; do
[ "$f" = "$hookdir/initqueue/finished/*.sh" ] && return 0
{ [ -e "$f" ] && ( . "$f" ) ; } || return 1
done
return 0
}
source_conf() {
local f
[ "$1" ] && [ -d "/$1" ] || return
for f in "/$1"/*.conf; do [ -e "$f" ] && . "$f"; done
}
die() {
{
echo "<24>dracut: FATAL: $@";
echo "<24>dracut: Refusing to continue";
} > /dev/kmsg
{
echo "warn dracut: FATAL: \"$@\"";
echo "warn dracut: Refusing to continue";
} >> $hookdir/emergency/01-die.sh
> /run/initramfs/.die
exit 1
}
check_quiet() {
if [ -z "$DRACUT_QUIET" ]; then
DRACUT_QUIET="yes"
getargbool 0 rd.info -y rdinfo && DRACUT_QUIET="no"
getargbool 0 rd.debug -y rdinitdebug && DRACUT_QUIET="no"
getarg quiet || DRACUT_QUIET="yes"
fi
}
warn() {
check_quiet
echo "<28>dracut Warning: $@" > /dev/kmsg
echo "dracut Warning: $@" >&2
}
info() {
check_quiet
echo "<30>dracut: $@" > /dev/kmsg
[ "$DRACUT_QUIET" != "yes" ] && \
echo "dracut: $@"
}
vwarn() {
while read line; do
warn $line;
done
}
vinfo() {
while read line; do
info $line;
done
}
check_occurances() {
# Count the number of times the character $ch occurs in $str
# Return 0 if the count matches the expected number, 1 otherwise
local str="$1"
local ch="$2"
local expected="$3"
local count=0
while [ "${str#*$ch}" != "${str}" ]; do
str="${str#*$ch}"
count=$(( $count + 1 ))
done
[ $count -eq $expected ]
}
incol2() {
local dummy check;
local file="$1";
local str="$2";
[ -z "$file" ] && return 1;
[ -z "$str" ] && return 1;
while read dummy check restofline; do
[ "$check" = "$str" ] && return 0
done < $file
return 1
}
udevsettle() {
[ -z "$UDEVVERSION" ] && UDEVVERSION=$(udevadm --version)
if [ $UDEVVERSION -ge 143 ]; then
udevadm settle --exit-if-exists=$hookdir/initqueue/work $settle_exit_if_exists
else
udevadm settle --timeout=30
fi
}
udevproperty() {
[ -z "$UDEVVERSION" ] && UDEVVERSION=$(udevadm --version)
if [ $UDEVVERSION -ge 143 ]; then
for i in "$@"; do udevadm control --property=$i; done
else
for i in "$@"; do udevadm control --env=$i; done
fi
}
ismounted() {
while read a m a; do
[ "$m" = "$1" ] && return 0
done < /proc/mounts
return 1
}
wait_for_if_up() {
local cnt=0
while [ $cnt -lt 200 ]; do
li=$(ip link show $1)
[ -z "${li##*state UP*}" ] && return 0
sleep 0.1
cnt=$(($cnt+1))
done
return 1
}
# root=nfs:[<server-ip>:]<root-dir>[:<nfs-options>]
# root=nfs4:[<server-ip>:]<root-dir>[:<nfs-options>]
nfsroot_to_var() {
# strip nfs[4]:
local arg="$@:"
nfs="${arg%%:*}"
arg="${arg##$nfs:}"
# check if we have a server
if strstr "$arg" ':/*' ; then
server="${arg%%:/*}"
arg="/${arg##*:/}"
fi
path="${arg%%:*}"
# rest are options
options="${arg##$path}"
# strip leading ":"
options="${options##:}"
# strip ":"
options="${options%%:}"
# Does it really start with '/'?
[ -n "${path%%/*}" ] && path="error";
#Fix kernel legacy style separating path and options with ','
if [ "$path" != "${path#*,}" ] ; then
options=${path#*,}
path=${path%%,*}
fi
}
ip_to_var() {
local v=${1}:
local i
set --
while [ -n "$v" ]; do
if [ "${v#\[*:*:*\]:}" != "$v" ]; then
# handle IPv6 address
i="${v%%\]:*}"
i="${i##\[}"
set -- "$@" "$i"
v=${v#\[$i\]:}
else
set -- "$@" "${v%%:*}"
v=${v#*:}
fi
done
unset ip srv gw mask hostname dev autoconf
case $# in
0) autoconf="error" ;;
1) autoconf=$1 ;;
2) dev=$1; autoconf=$2 ;;
*) ip=$1; srv=$2; gw=$3; mask=$4; hostname=$5; dev=$6; autoconf=$7 ;;
esac
}
# Create udev rule match for a device with its device name, or the udev property
# ID_FS_UUID or ID_FS_LABEL
#
# example:
# udevmatch LABEL=boot
# prints:
# ENV{ID_FS_LABEL}="boot"
#
# TOOD: symlinks
udevmatch() {
case "$1" in
UUID=????????-????-????-????-????????????|LABEL=*)
printf 'ENV{ID_FS_%s}=="%s"' "${1%%=*}" "${1#*=}"
;;
UUID=*)
printf 'ENV{ID_FS_UUID}=="%s*"' "${1#*=}"
;;
/dev/?*) printf 'KERNEL=="%s"' "${1#/dev/}" ;;
*) return 255 ;;
esac
}
# Prints unique path for potential file inside specified directory. It consists
# of specified directory, prefix and number at the end which is incremented
# until non-existing file is found.
#
# funiq dir prefix
#
# example:
# # ls /mnt
# cdrom0 cdrom1
#
# # funiq /mnt cdrom
# /mnt/cdrom2
funiq() {
local dir="$1"; local prefix="$2"
local i=0
[ -d "${dir}" ] || return 1
while [ -e "${dir}/${prefix}$i" ]; do
i=$(($i+1)) || return 1
done
echo "${dir}/${prefix}$i"
}
# Creates unique directory and prints its path. It's using funiq to generate
# path.
#
# mkuniqdir subdir new_dir_name
mkuniqdir() {
local dir="$1"; local prefix="$2"
local retdir; local retdir_new
[ -d "${dir}" ] || mkdir -m 0755 -p "${dir}" || return 1
retdir=$(funiq "${dir}" "${prefix}") || return 1
until mkdir -m 0755 "${retdir}" 2>/dev/null; do
retdir_new=$(funiq "${dir}" "${prefix}") || return 1
[ "$retdir_new" = "$retdir" ] && return 1
retdir="$retdir_new"
done
echo "${retdir}"
}
# Evaluates command for UUIDs either given as arguments for this function or all
# listed in /dev/disk/by-uuid. UUIDs doesn't have to be fully specified. If
# beginning is given it is expanded to all matching UUIDs. To pass full UUID to
# your command use '$___' as a place holder. Remember to escape '$'!
#
# foreach_uuid_until [ -p prefix ] command UUIDs
#
# prefix - string to put just before $___
# command - command to be evaluated
# UUIDs - list of UUIDs separated by space
#
# The function returns after *first successful evaluation* of the given command
# with status 0. If evaluation fails for every UUID function returns with
# status 1.
#
# Example:
# foreach_uuid_until "mount -U \$___ /mnt; echo OK; umount /mnt" \
# "01234 f512 a235567f-12a3-c123-a1b1-01234567abcb"
foreach_uuid_until() (
cd /dev/disk/by-uuid
[ "$1" = -p ] && local prefix="$2" && shift 2
local cmd="$1"; shift; local uuids_list="$*"
local uuid; local full_uuid; local ___
[ -n "${cmd}" ] || return 1
for uuid in ${uuids_list:-*}; do
for full_uuid in ${uuid}*; do
[ -e "${full_uuid}" ] || continue
___="${prefix}${full_uuid}"
eval ${cmd} && return 0
done
done
return 1
)
# Get kernel name for given device. Device may be the name too (then the same
# is returned), a symlink (full path), UUID (prefixed with "UUID=") or label
# (prefixed with "LABEL="). If just a beginning of the UUID is specified or
# even an empty, function prints all device names which UUIDs match - every in
# single line.
#
# NOTICE: The name starts with "/dev/".
#
# Example:
# devnames UUID=123
# May print:
# /dev/dm-1
# /dev/sdb1
# /dev/sdf3
devnames() {
local dev="$1"; local d; local names
case "$dev" in
UUID=*)
dev="$(foreach_uuid_until '! blkid -U $___' "${dev#UUID=}")" \
&& return 255
[ -z "$dev" ] && return 255
;;
LABEL=*) dev="$(blkid -L "${dev#LABEL=}")" || return 255 ;;
/dev/?*) ;;
*) return 255 ;;
esac
for d in $dev; do
names="$names
$(readlink -e -q "$d")" || return 255
done
echo "${names#
}"
}
usable_root() {
local _d
[ -d $1 ] || return 1
for _d in proc sys dev; do
[ -e "$1"/$_d ] || return 1
done
return 0
}
wait_for_mount()
{
local _name
_name="$(str_replace "$1" '/' '\\x2f')"
printf '. /lib/dracut-lib.sh\nismounted "%s"\n' $1 \
>> "$hookdir/initqueue/finished/ismounted-${_name}.sh"
{
printf 'ismounted "%s" || ' $1
printf 'warn "\"%s\" is not mounted"\n' $1
} >> "$hookdir/emergency/90-${_name}.sh"
}
wait_for_dev()
{
local _name
_name="$(str_replace "$1" '/' '\\x2f')"
printf '[ -e "%s" ]\n' $1 \
>> "$hookdir/initqueue/finished/devexists-${_name}.sh"
{
printf '[ -e "%s" ] || ' $1
printf 'warn "\"%s\" does not exist"\n' $1
} >> "$hookdir/emergency/80-${_name}.sh"
}
killproc() {
local _exe="$(command -v $1)"
local _sig=$2
local _i
[ -x "$_exe" ] || return 1
for _i in /proc/[0-9]*; do
[ "$_i" = "/proc/1" ] && continue
if [ -e "$_i"/_exe ] && [ "$_i/_exe" -ef "$_exe" ] ; then
kill $_sig ${_i##*/}
fi
done
}
|
congwang/dracut
|
modules.d/99base/dracut-lib.sh
|
Shell
|
gpl-2.0
| 14,988 |
# Test for qemu on ARM
# Copyright (C) 2015 Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
root=tests/qemu-smbios
output="$(./virt-what --test-root=$root 2>&1)"
expected="qemu"
if [ "$output" != "$expected" ]; then
echo "$0: test failed because output did not match expected"
echo "Expected output was:"
echo "----------------------------------------"
echo "$expected"
echo "----------------------------------------"
echo "But the actual output of the program was:"
echo "----------------------------------------"
echo "$output"
echo "----------------------------------------"
exit 1
fi
|
pexip/os-virt-what
|
tests/test-qemu-smbios.sh
|
Shell
|
gpl-2.0
| 1,282 |
#!/bin/bash
MAINVER="0.0.1"
Extra="-1"
if [ -d "source" ]
then
cd source
git pull
cd ..
else
git clone https://github.com/AndroidTamer/YADD ./source
fi
rm -rf usr
#Get commit hash
cd source
SVER=`git log --pretty=format:'%h' -n 1`
cd ..
VERSION=$MAINVER"-SNAPSHOT-"$SVER$Extra
# Build
cd source
./clean.py --rebuild
cd build
cmake ..
make
cd ..
if [ ! -f bin/dumper ]
then
echo "build failed"
exit
fi
cd ..
mkdir -p usr/bin usr/share/applications
cp source/bin/dumper usr/bin/yadd-dumper
chmod 755 usr/bin/yadd-dumper
cat <<EOF > usr/share/applications/yadd-dumper.desktop
#!/usr/bin/env xdg-open
[Desktop Entry]
Version=1.0
Type=Application
Terminal=false
TryExec=/usr/bin/yadd-dumper
Exec=x-terminal-emulator --command "yadd-dumper --help; $SHELL"
Name=YAAD-dumper
Description: Yet another Android Dex bytecode Disassembler
Icon=terminator
Categories=X-tamer-manualanalysis
EOF
debctrl "yadd-dumper" "$VERSION" "Yet another Android Dex bytecode Disassembler\n YADD is planed to be a complex disassembler for the Android Dex bytecode\n That is, a hybrid tool to support pure binary/signature dumping and to \n provide an interface for reversing analysis" "https://github.com/AndroidTamer/YADD" "amd64" "python, libc6"
changelog "Initial release for Android Tamer"
build_package usr
|
AndroidTamer/Packaging_Tools
|
Build/YADD/build.sh
|
Shell
|
gpl-2.0
| 1,291 |
CROSS_TOOL_CHAIN=/home/wonfee/environment/android-kitkat/prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.6/bin
export PATH=$PATH:$CROSS_TOOL_CHAIN
export CROSS_COMPILE=arm-linux-androideabi-
export ARCH=arm
if [ "$1" ] && [ "$1" == "distclean" ]
then
make distclean
else
make hisi_k3v2oem1_defconfig
make zImage
fi
|
Wonfee/huawei_u9508_kernel
|
mk.sh
|
Shell
|
gpl-2.0
| 332 |
#!/usr/bin/env bash
set -e
PACKAGES="cmake pkg-config fftw libogg libvorbis libsndfile libsamplerate jack sdl stk portaudio node fltk"
if [ "$QT5" ]; then
PACKAGES="$PACKAGES [email protected]"
else
PACKAGES="$PACKAGES cartr/qt4/qt@4"
fi
if "${TRAVIS}"; then
PACKAGES="$PACKAGES ccache"
fi
# removing already installed packages from the list
for p in $(brew list); do
PACKAGES=${PACKAGES//$p/}
done;
# shellcheck disable=SC2086
brew install $PACKAGES
# Recompile fluid-synth without CoreAudio per issues #649
# Changes to fluid-synth.rb must be pushed to URL prior to use
if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then
slug=$TRAVIS_PULL_REQUEST_SLUG
branch=$TRAVIS_PULL_REQUEST_BRANCH
elif "${TRAVIS}"; then
slug=$TRAVIS_REPO_SLUG
branch=$TRAVIS_BRANCH
else
slug="LMMS/lmms"
branch=$(git symbolic-ref --short HEAD)
fi
brew install --build-from-source "https://raw.githubusercontent.com/${slug}/${branch}/cmake/apple/fluid-synth.rb"
sudo npm install -g appdmg
|
mamins1376/lmms
|
.travis/osx..install.sh
|
Shell
|
gpl-2.0
| 974 |
#!/bin/bash
make VARIANT_DEFCONFIG=msm8916_sec_j53g_eur_defconfig msm8916_sec_h_defconfig SELINUX_DEFCONFIG=selinux_defconfig
make -j9
tools/dtbTool -s 2048 -o arch/arm/boot/dt.img -p scripts/dtc/ arch/arm/boot/dts/ -v
|
DJSteve/kernel_j5nlte_mm
|
build_kernel-h.sh
|
Shell
|
gpl-2.0
| 221 |
#!/bin/sh
# llcolor.sh
T='RGB'
fgs_a=(' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' '1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' ' 36m' '1;36m' ' 37m' '1;37m')
echo
echo -e "01234567012345670123456701234567012345670123456701234567012345670123456701234567"
echo -e "--------------------------------------------------------------------------------"
echo "fg \ bg | no-bg | 40m | 41m | 42m | 43m | 44m | 45m | 46m | 47m |"
echo -e "--------+-------+-------+-------+-------+-------+-------+-------+-------+-------"
for fgs in "${fgs_a[@]}"
do
fg=${fgs// /}
echo -en " $fgs |\033[$fg $T\033[0m "
for bg in 40m 41m 42m 43m 44m 45m 46m 47m
do
echo -en " \033[$fg\033[$bg $T \033[0m";
done
echo
done
echo
_ansi() { read FB E <<<"${1/,/ }"; echo -e "${FB:-_} ${E:-_}: \e[${FB}m${*:2}\e[${E}m"; }
ansi() { _ansi '' "$@"; }
ans0() { _ansi 0 "$@"; }
bold() { _ansi 1 "$@"; }
dimn() { _ansi 2 "$@"; }
italic() { _ansi 3 "$@"; }
underline() { _ansi 4 "$@"; }
strikethr() { _ansi 9 "$@"; }
red() { _ansi 31 "$@"; }
green() { _ansi 32 "$@"; }
yellow() { _ansi 33 "$@"; }
blue() { _ansi 34 "$@"; }
hred() { _ansi "1;31" "$@"; }
hgreen() { _ansi "1;32" "$@"; }
hyellow() { _ansi "1;33" "$@"; }
hblue() { _ansi "1;34" "$@"; }
ansi ansi {a..z} {1..9}
ans0 ans0 {a..z} {1..9}
bold bold {a..z} {1..9}
dimn dimn {a..z} {1..9}
italic italic {a..z} {1..9}
underline underline {a..z} {1..9}
strikethr strikethr {a..z} {1..9}
red red {a..z} {1..9}
green green {a..z} {1..9}
yellow yellow {a..z} {1..9}
blue blue {a..z} {1..9}
hred hred {a..z} {1..9}
hgreen hgreen {a..z} {1..9}
hyellow hyellow {a..z} {1..9}
hblue hblue {a..z} {1..9}
|
tcler/bkr-client-improved
|
utils/lscolor.sh
|
Shell
|
gpl-2.0
| 1,811 |
#!/system/bin/sh
# *****************************
# i9300 Samsung 4.4.4 version
#
# V0.1
# *****************************
# define basic kernel configuration
# path to internal sd memory
SD_PATH="/data/media/0"
# block devices
SYSTEM_DEVICE="/dev/block/mmcblk0p9"
CACHE_DEVICE="/dev/block/mmcblk0p8"
DATA_DEVICE="/dev/block/mmcblk0p12"
# define file paths
BOEFFLA_DATA_PATH="$SD_PATH/boeffla-kernel-data"
BOEFFLA_LOGFILE="$BOEFFLA_DATA_PATH/boeffla-kernel.log"
BOEFFLA_STARTCONFIG="/data/.boeffla/startconfig"
BOEFFLA_STARTCONFIG_DONE="/data/.boeffla/startconfig_done"
CWM_RESET_ZIP="boeffla-config-reset-v3.1.zip"
INITD_ENABLER="/data/.boeffla/enable-initd"
BUSYBOX_ENABLER="/data/.boeffla/enable-busybox"
FRANDOM_ENABLER="/data/.boeffla/enable-frandom"
# If not yet existing, create a boeffla-kernel-data folder on sdcard
# which is used for many purposes,
# always set permissions and owners correctly for pathes and files
if [ ! -d "$BOEFFLA_DATA_PATH" ] ; then
/sbin/busybox mkdir $BOEFFLA_DATA_PATH
fi
/sbin/busybox chmod 775 $SD_PATH
/sbin/busybox chown 1023:1023 $SD_PATH
/sbin/busybox chmod -R 775 $BOEFFLA_DATA_PATH
/sbin/busybox chown -R 1023:1023 $BOEFFLA_DATA_PATH
# maintain log file history
rm $BOEFFLA_LOGFILE.3
mv $BOEFFLA_LOGFILE.2 $BOEFFLA_LOGFILE.3
mv $BOEFFLA_LOGFILE.1 $BOEFFLA_LOGFILE.2
mv $BOEFFLA_LOGFILE $BOEFFLA_LOGFILE.1
# Initialize the log file (chmod to make it readable also via /sdcard link)
echo $(date) Boeffla-Kernel initialisation started > $BOEFFLA_LOGFILE
/sbin/busybox chmod 666 $BOEFFLA_LOGFILE
/sbin/busybox cat /proc/version >> $BOEFFLA_LOGFILE
echo "=========================" >> $BOEFFLA_LOGFILE
/sbin/busybox grep ro.build.version /system/build.prop >> $BOEFFLA_LOGFILE
echo "=========================" >> $BOEFFLA_LOGFILE
# Activate frandom entropy generator if configured
if [ -f $FRANDOM_ENABLER ]; then
echo $(date) "Frandom entropy generator activation requested" >> $BOEFFLA_LOGFILE
/sbin/busybox insmod /lib/modules/frandom.ko
/sbin/busybox insmod /system/lib/modules/frandom.ko
if [ ! -e /dev/urandom.ORIG ] && [ ! -e /dev/urandom.orig ] && [ ! -e /dev/urandom.ori ]; then
/sbin/busybox touch /dev/urandom.MOD
/sbin/busybox touch /dev/random.MOD
/sbin/busybox mv /dev/urandom /dev/urandom.ORIG
/sbin/busybox ln /dev/erandom /dev/urandom
/sbin/busybox busybox chmod 644 /dev/urandom
/sbin/busybox mv /dev/random /dev/random.ORIG
/sbin/busybox ln /dev/erandom /dev/random
/sbin/busybox busybox chmod 644 /dev/random
/sbin/busybox sleep 0.5s
/sbin/busybox sync
echo $(date) "Frandom entropy generator activated" >> $BOEFFLA_LOGFILE
fi
fi
# Install busybox applet symlinks to /system/xbin if enabled,
# otherwise only install mount/umount/top symlinks
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
if [ -f $BUSYBOX_ENABLER ]; then
/sbin/busybox --install -s /system/xbin
echo $(date) "Busybox applet symlinks installed to /system/xbin" >> $BOEFFLA_LOGFILE
else
/sbin/busybox ln -s /sbin/busybox /system/xbin/mount
/sbin/busybox ln -s /sbin/busybox /system/xbin/umount
/sbin/busybox ln -s /sbin/busybox /system/xbin/top
echo $(date) "Mount/umount/top applet symlinks installed to /system/xbin" >> $BOEFFLA_LOGFILE
fi
/sbin/busybox sync
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
# Correct /sbin and /res directory and file permissions
mount -o remount,rw rootfs /
# change permissions of /sbin folder and scripts in /res/bc
/sbin/busybox chmod -R 755 /sbin
/sbin/busybox chmod 755 /res/bc/*
/sbin/busybox sync
mount -o remount,ro rootfs /
# remove any obsolete Boeffla-Config V2 startconfig done file
/sbin/busybox rm -f $BOEFFLA_STARTCONFIG_DONE
# Custom boot animation support
# Implementation 1
#if [ -f /data/local/bootanimation.zip ] || [ -f /system/media/bootanimation.zip ]; then
# echo $(date) Playing custom boot animation >> $BOEFFLA_LOGFILE
# /system/bin/bootanimation &
#else
# echo $(date) Playing Samsung stock boot animation >> $BOEFFLA_LOGFILE
# /system/bin/samsungani &
#fi
# Implementation 2
if [ -f /data/local/bootanimation.zip ] || [ -f /system/media/bootanimation.zip ]; then
echo $(date) Playing custom boot animation >> $BOEFFLA_LOGFILE
/sbin/bootanimation &
else
echo $(date) Playing Samsung stock boot animation >> $BOEFFLA_LOGFILE
/system/bin/bootanimation &
fi
# boeffla sound change delay (only for Samsung Kernels)
echo "200000" > /sys/class/misc/boeffla_sound/change_delay
echo $(date) Boeffla-Sound change delay set to 200 ms >> $BOEFFLA_LOGFILE
# Apply Boeffla-Kernel default settings
# Set AC charging rate default
echo "1100" > /sys/kernel/charge_levels/charge_level_ac
# Ext4 tweaks default to on
/sbin/busybox sync
mount -o remount,commit=20,noatime $CACHE_DEVICE /cache
/sbin/busybox sync
mount -o remount,commit=20,noatime $DATA_DEVICE /data
/sbin/busybox sync
# Sdcard buffer tweaks default to 256 kb
echo 256 > /sys/block/mmcblk0/bdi/read_ahead_kb
echo 256 > /sys/block/mmcblk1/bdi/read_ahead_kb
echo $(date) Boeffla-Kernel default settings applied >> $BOEFFLA_LOGFILE
# init.d support (enabler only to be considered for CM based roms)
# (zipalign scripts will not be executed as only exception)
#if [ -f $INITD_ENABLER ] ; then
echo $(date) Execute init.d scripts start >> $BOEFFLA_LOGFILE
if cd /system/etc/init.d >/dev/null 2>&1 ; then
for file in * ; do
if ! cat "$file" >/dev/null 2>&1 ; then continue ; fi
if [[ "$file" == *zipalign* ]]; then continue ; fi
echo $(date) init.d file $file started >> $BOEFFLA_LOGFILE
/system/bin/sh "$file"
echo $(date) init.d file $file executed >> $BOEFFLA_LOGFILE
done
fi
echo $(date) Finished executing init.d scripts >> $BOEFFLA_LOGFILE
#else
# echo $(date) init.d script handling by kernel disabled >> $BOEFFLA_LOGFILE
#fi
# Now wait for the rom to finish booting up
# (by checking for the android acore process)
echo $(date) Checking for Rom boot trigger... >> $BOEFFLA_LOGFILE
while ! /sbin/busybox pgrep com.android.systemui ; do
/sbin/busybox sleep 1
done
echo $(date) Rom boot trigger detected, waiting a few more seconds... >> $BOEFFLA_LOGFILE
/sbin/busybox sleep 10
# Play sound for Boeffla-Sound compatibility
echo $(date) Initialize sound system... >> $BOEFFLA_LOGFILE
/sbin/tinyplay /res/misc/silence.wav -D 0 -d 0 -p 880
# Deactivate Samsung standard zRam implementation, if any
busybox swapoff /dev/block/zram0
echo "1" > /sys/block/zram0/reset
echo "0" > /sys/block/zram0/disksize
echo $(date) Samsung standard zRam deactivated >> $BOEFFLA_LOGFILE
# Interaction with Boeffla-Config app V2
# save original stock values for selected parameters
cat /sys/devices/system/cpu/cpu0/cpufreq/UV_mV_table > /dev/bk_orig_cpu_voltage
cat /sys/class/misc/gpu_clock_control/gpu_control > /dev/bk_orig_gpu_clock
cat /sys/class/misc/gpu_voltage_control/gpu_control > /dev/bk_orig_gpu_voltage
cat /sys/kernel/charge_levels/charge_level_ac > /dev/bk_orig_charge_level_ac
cat /sys/kernel/charge_levels/charge_level_usb > /dev/bk_orig_charge_level_usb
cat /sys/kernel/charge_levels/charge_level_wireless > /dev/bk_orig_charge_level_wireless
cat /sys/module/lowmemorykiller/parameters/minfree > /dev/bk_orig_minfree
/sbin/busybox lsmod > /dev/bk_orig_modules
# if there is a startconfig placed by Boeffla-Config V2 app, execute it;
if [ -f $BOEFFLA_STARTCONFIG ]; then
echo $(date) "Startup configuration found:" >> $BOEFFLA_LOGFILE
cat $BOEFFLA_STARTCONFIG >> $BOEFFLA_LOGFILE
. $BOEFFLA_STARTCONFIG
echo $(date) Startup configuration applied >> $BOEFFLA_LOGFILE
else
echo $(date) "No startup configuration found" >> $BOEFFLA_LOGFILE
# If not, apply default Boeffla-Kernel zRam
# Enable total 600 MB zRam on 4 devices as default
busybox swapoff /dev/block/zram0
busybox swapoff /dev/block/zram1
busybox swapoff /dev/block/zram2
busybox swapoff /dev/block/zram3
echo "1" > /sys/block/zram0/reset
echo "1" > /sys/block/zram1/reset
echo "1" > /sys/block/zram2/reset
echo "1" > /sys/block/zram3/reset
echo "157286400" > /sys/block/zram0/disksize
echo "157286400" > /sys/block/zram1/disksize
echo "157286400" > /sys/block/zram2/disksize
echo "157286400" > /sys/block/zram3/disksize
busybox mkswap /dev/block/zram0
busybox mkswap /dev/block/zram1
busybox mkswap /dev/block/zram2
busybox mkswap /dev/block/zram3
busybox swapon -p 2 /dev/block/zram0
busybox swapon -p 2 /dev/block/zram1
busybox swapon -p 2 /dev/block/zram2
busybox swapon -p 2 /dev/block/zram3
busybox sleep 0.5s
busybox sync
echo "80" > /proc/sys/vm/swappiness
echo $(date) Boeffla default zRam activated >> $BOEFFLA_LOGFILE
fi
# Turn off debugging for certain modules
echo 0 > /sys/module/ump/parameters/ump_debug_level
echo 0 > /sys/module/mali/parameters/mali_debug_level
echo 0 > /sys/module/kernel/parameters/initcall_debug
echo 0 > /sys/module/lowmemorykiller/parameters/debug_level
echo 0 > /sys/module/earlysuspend/parameters/debug_mask
echo 0 > /sys/module/alarm/parameters/debug_mask
echo 0 > /sys/module/alarm_dev/parameters/debug_mask
echo 0 > /sys/module/binder/parameters/debug_mask
echo 0 > /sys/module/xt_qtaguid/parameters/debug_mask
# Auto root support
if [ -f $SD_PATH/autoroot ]; then
echo $(date) Auto root is enabled >> $BOEFFLA_LOGFILE
mount -o remount,rw -t ext4 $SYSTEM_DEVICE /system
/sbin/busybox mkdir /system/bin/.ext
/sbin/busybox cp /res/misc/su /system/xbin/su
/sbin/busybox cp /res/misc/su /system/xbin/daemonsu
/sbin/busybox cp /res/misc/su /system/bin/.ext/.su
/sbin/busybox cp /res/misc/install-recovery.sh /system/etc/install-recovery.sh
/sbin/busybox echo /system/etc/.installed_su_daemon
/sbin/busybox chown 0.0 /system/bin/.ext
/sbin/busybox chmod 0777 /system/bin/.ext
/sbin/busybox chown 0.0 /system/xbin/su
/sbin/busybox chmod 6755 /system/xbin/su
/sbin/busybox chown 0.0 /system/xbin/daemonsu
/sbin/busybox chmod 6755 /system/xbin/daemonsu
/sbin/busybox chown 0.0 /system/bin/.ext/.su
/sbin/busybox chmod 6755 /system/bin/.ext/.su
/sbin/busybox chown 0.0 /system/etc/install-recovery.sh
/sbin/busybox chmod 0755 /system/etc/install-recovery.sh
/sbin/busybox chown 0.0 /system/etc/.installed_su_daemon
/sbin/busybox chmod 0644 /system/etc/.installed_su_daemon
/system/bin/sh /system/etc/install-recovery.sh
/sbin/busybox sync
mount -o remount,ro -t ext4 $SYSTEM_DEVICE /system
echo $(date) Auto root: su installed >> $BOEFFLA_LOGFILE
rm $SD_PATH/autoroot
fi
# EFS backup
EFS_BACKUP_INT="$BOEFFLA_DATA_PATH/efs.tar.gz"
EFS_BACKUP_EXT="/storage/extSdCard/efs.tar.gz"
if [ ! -f $EFS_BACKUP_INT ]; then
cd /efs
/sbin/busybox tar cvz -f $EFS_BACKUP_INT .
/sbin/busybox chmod 666 $EFS_BACKUP_INT
/sbin/busybox cp $EFS_BACKUP_INT $EFS_BACKUP_EXT
echo $(date) EFS Backup: Not found, now created one >> $BOEFFLA_LOGFILE
fi
# Copy reset cwm zip in boeffla-kernel-data folder
CWM_RESET_ZIP_SOURCE="/res/misc/$CWM_RESET_ZIP"
CWM_RESET_ZIP_TARGET="$BOEFFLA_DATA_PATH/$CWM_RESET_ZIP"
if [ ! -f $CWM_RESET_ZIP_TARGET ]; then
/sbin/busybox cp $CWM_RESET_ZIP_SOURCE $CWM_RESET_ZIP_TARGET
/sbin/busybox chmod 666 $CWM_RESET_ZIP_TARGET
echo $(date) CWM reset zip copied >> $BOEFFLA_LOGFILE
fi
# Disable knox
# pm disable com.sec.knox.seandroid
# Finished
echo $(date) Boeffla-Kernel initialisation completed >> $BOEFFLA_LOGFILE
|
ea4862/boeffla
|
ramdisk_boeffla/fs/sbin/boeffla-init.sh
|
Shell
|
gpl-2.0
| 11,475 |
#!/bin/bash
# Check wich machines were wrongly configured on production instances on production servers
#
# 2015-11-21 - by s3cur3n3t
#
##########################################################################################################
############
# MAIN #
############
# Should be used with source file check_mk.conf
. default/check_mk.conf
# Call functions
get_ips
send_mail_ips
clean_tmp
|
s3cur3n3t/DevOps
|
devoops/check_ip.sh
|
Shell
|
gpl-2.0
| 405 |
#!/bin/bash
# Global variables:
# ${GV_LOG}: Prefix this variable in echo to log echoed string.
SCRIPT_NAME="$(basename "$(test -L "$0" && readlink "$0" || echo "$0")")"
echo "${GV_LOG}>>>>>>>>> Running ${SCRIPT_NAME} ..."
# Install galculator
apt-get -y --force-yes install calcoo
# Log
echo "${GV_LOG} * Install calculator: calcoo."
# REJECTION: calcoo is ugly
# Note: Gnome has 52528K overhead.
#
# * Running inst-std-accessories-calc-calcoo.sh ...
# * After this operation, 123 kB of additional disk space will be used.
# * Install calculator: calcoo.
# * Disk size = 2715708K. Space Used = 220K.
# * Running inst-std-accessories-calc-gnome-calculator.sh ...
# * After this operation, 66.4 MB of additional disk space will be used.
# * Install calculator: gnome-calculator.
# * Disk size = 2777428K. Space Used = 61720K.
# * Running inst-std-accessories-calc-qalculate-gtk.sh ...
# * After this operation, 8,248 kB of additional disk space will be used.
# * Install calculator: qalculate-gtk.
# * Disk size = 2788156K. Space Used = 10728K.
# * Running inst-std-accessories-calc-speedcrunch.sh ...
# * After this operation, 1,830 kB of additional disk space will be used.
# * Install calculator: speedcrunch.
# * Disk size = 2791292K. Space Used = 3136K.
# * Running inst-std-accessories-calc-wcalc.sh ...
# * After this operation, 1,170 kB of additional disk space will be used.
# * Install calculator: wcalc.
# * Disk size = 2793116K. Space Used = 1824K.
# * Running inst-std-accessories-calc-x11-apps.sh ...
# * After this operation, 20.8 MB of additional disk space will be used.
# * Install calculator: xcalc.
# * Disk size = 2819800K. Space Used = 26684K.
# * Running inst-std-accessories-galculator.sh ...
# * After this operation, 1,424 kB of additional disk space will be used.
# * Install Calculator: galculator.
# * Insert galculator in Accessories menu.
# * Disk size = 2825140K. Space Used = 1520K.
|
limelime/cust-live-deb
|
scripts-rejected/inst-std-accessories-calc-calcoo/inst-std-accessories-calc-calcoo.sh
|
Shell
|
gpl-2.0
| 2,024 |
#!/bin/bash
# Example for running
docker run -e HOME=/root -t -i imiell/sd_gdk_pixbuf /bin/bash
|
ianmiell/shutit-distro
|
gdk_pixbuf/bin/run.sh
|
Shell
|
gpl-2.0
| 96 |
#!/bin/bash
lessc tapasG.less > tapasG.css
lessc tapasGdiplo.less > tapasGdiplo.css
lessc tapasGnormal.less > tapasGnormal.css
#lessc sleepy.less > sleepy.css
#lessc terminal.less > terminal.css
|
NEU-DSG-archive/tapas
|
sites/all/modules/tapas_transform/transforms/tapas/css/makecss.bash
|
Shell
|
gpl-2.0
| 205 |
#!/usr/bin/env bats
# Created by Shahriyar Rzayev from Percona
WORKDIR="${PWD}"
DIRNAME="$BATS_TEST_DIRNAME"
DIRNAME=$(dirname "$0")
# Preparing test env
function clone_and_build() {
git clone --recursive --depth=1 https://github.com/percona/percona-server.git -b 5.7 PS-5.7-trunk
cd $1/PS-5.7-trunk
# from percona-qa repo
~/percona-qa/build_5.x_debug.sh
}
function run_startup() {
cd $1
# from percona-qa repo
~/percona-qa/startup.sh
}
function start_server() {
cd $1
./start
}
function execute_sql() {
# General function to pass sql statement to mysql client
conn_string=$(cat $1/cl)
${conn_string} -e "$2"
}
function run_generated_columns_test() {
# Calling bats file
if [[ $tap == 1 ]] ; then
bats --tap $DIRNAME/generated_columns.bats
else
bats $DIRNAME/generated_columns.bats
fi
}
function run_json_test() {
# Calling bats file
if [[ $tap == 1 ]] ; then
bats --tap $DIRNAME/json.bats
else
bats $DIRNAME/json.bats
fi
}
function install_mysql_connector() {
# Downloading rpm package for CentOS 7
# For now installing it globally
# TODO: Install this package inside Python virtualenv to not affect whole system globally...
IF_INSTALLED=$(rpm -qa | grep mysql-connector-python-8.0)
if [ -z $IF_INSTALLED ] ; then
wget https://dev.mysql.com/get/Downloads/Connector-Python/mysql-connector-python-8.0.4-0.1.dmr.el7.x86_64.rpm
sudo yum install mysql-connector-python-8.0.4-0.1.dmr.el7.x86_64.rpm
else
echo "Already Installed"
fi
}
function run_mysqlx_plugin_test() {
python $DIRNAME/myrocks_mysqlx_plugin.py
}
# Run clone and build here
if [[ $clone == 1 ]] ; then
echo "Clone and Build server from repo"
clone_and_build ${WORKDIR}
else
echo "Skipping Clone and Build"
fi
# Get BASEDIR here
BASEDIR=$(ls -1td ${WORKDIR}/PS* | grep -v ".tar" | grep PS[0-9])
# Run startup.sh here
echo "Running startup.sh from percona-qa"
run_startup ${BASEDIR}
# Start server here
echo "Starting Server!"
start_server ${BASEDIR}
# Create sample database here
echo "Creating sample database"
DB="create database generated_columns_test"
execute_sql ${BASEDIR} "${DB}"
# Create sample table here
echo "Creating sample table"
TABLE="CREATE TABLE generated_columns_test.sbtest1 (
id int(11) NOT NULL AUTO_INCREMENT,
k int(11) NOT NULL DEFAULT '0',
c char(120) NOT NULL DEFAULT '',
pad char(60) NOT NULL DEFAULT '',
PRIMARY KEY (id)
) ENGINE=InnoDB"
execute_sql ${BASEDIR} "${TABLE}"
# Altering table engine to MyRocks here
echo "Altering table engine"
ALTER="alter table generated_columns_test.sbtest1 engine=rocksdb"
execute_sql ${BASEDIR} "${ALTER}"
# Calling generated_columns.bats file here
echo "Running generated_columns.bats"
run_generated_columns_test
# Calling json.bats file here
echo "Running json.bats"
run_json_test
# Installing mysql-connector-python
echo "Installing mysql-connector-python"
install_mysql_connector
# Installing mysqlx plugin
echo "Installing mysqlx plugin"
MYSQLX="INSTALL PLUGIN mysqlx SONAME 'mysqlx.so'"
execute_sql ${BASEDIR} "${MYSQLX}"
# Creating user for X Plugin tests
echo "Creating sample user"
USER="create user bakux@localhost identified by 'Baku12345'"
execute_sql ${BASEDIR} "${USER}"
# Giving "all" grants for new user
echo "Granting sample user"
GRANT="grant all on *.* to bakux@localhost"
execute_sql ${BASEDIR} "${GRANT}"
# Calling myrocks_mysqlx_plugin.py file here
echo "Running X Plugin test"
run_mysqlx_plugin_test
|
ShahriyarR/percona-qa
|
myrocks-tests/myrocks-testsuite.sh
|
Shell
|
gpl-2.0
| 3,474 |
#!/bin/bash
docker build -t=graphlab --build-arg "[email protected]" --build-arg "USER_KEY=ABCD-0123-EF45-6789-9876-54FE-3210-DCBA" .
|
curtiszimmerman/docker-ipython-graphlab-create
|
build.sh
|
Shell
|
gpl-3.0
| 147 |
#!/bin/bash
[[ -z $SEEDS ]] && SEEDS=`seq 1 10`
[[ -z $CYCLES ]] && CYCLES=100
RHO=0.98
SEEDS=($SEEDS)
N=${#SEEDS[@]}
printf "value"
for SEED in ${SEEDS[@]}
do
printf ",$SEED"
done
printf ",mean,sd\n"
getkey() {
KEY="$1"
printf "$KEY"
VALS=()
ACC=0
for SEED in ${SEEDS[@]}
do
VAL=$(bin/simulator -f data/calvin.dat -r $RHO -s $SEED -c $CYCLES | grep "$KEY:" | sed 's/[^:]*:\s*//')
ACC=$(echo "$ACC + $VAL" | bc -l)
VALS=("${VALS[@]}" "$VAL")
printf ",$VAL"
done
MEAN=$(echo "$ACC / $N" | bc -l)
SD=0
for VAL in ${VALS[@]}
do
SD=$(echo "$SD + ($VAL-$MEAN)*($VAL-$MEAN)" | bc -l)
done
SD=$(echo "sqrt ( $SD / ($N - 1))" | bc -l)
printf ",$MEAN,$SD\n"
}
getkey "rho"
getkey "L"
getkey "mean Y"
getkey "mean A"
getkey "W/W0"
|
cabul/smde-qt
|
utils/table.sh
|
Shell
|
gpl-3.0
| 753 |
#!/bin/bash
# to get rid of MSDOS format do this to this file: sudo sed -i s/\\r//g ./filename
# or, open in nano, control-o and then then alt-M a few times to toggle msdos format off and then save
# updated the list of installed gear so this works if we also use it : https://github.com/DeadSix27/python_cross_compile_script
sudo apt-get install -y build-essential autoconf libtool-bin libtool gettext autopoint gyp gperf autogen bzip2 pandoc
sudo apt-get install -y subversion curl texinfo g++ bison flex cvs yasm automake ed gcc cmake git make pkg-config mercurial unzip pax wget ant
sudo apt-get install -y git-remote-hg libxslt1.1 libxml2 rake docbook-utils docbook-xsl docbook-to-man docbook2x p7zip p7zip-full
sudo apt-get install -y xsltproc docbook-to-man itstool
#sudo apt-get remove -y nasm
sudo apt-get remove -y doxygen
# gendef is installed with mingw
sudo apt-get install -y libmozjs-dev libxmu-dev libgconf2-dev libdbus-1-dev network-manager-dev xserver-xorg-dev # for libproxy
sudo apt-get install -y zlib1g-dev #warning: you may need to install zlib development headers first if you want to build mp4-box on ubuntu
cd ~/Desktop
sudo chmod 777 -R *
#------------------------------------------------------------------------------------------------
# 2017.05.26 x264 has a new dependency on nasm 2.13.1 or greater ...
# before we do anything, build NASM if need be
set -x
if [[ ! -d "nasm-2.13.01" ]]; then
echo "Downloading nasm 2.13.01"
url="https://github.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/blob/master/miscellaneous/nasm-2.13.01.tar.xz?raw=true"
rm -f "nasm-2.13.01.tar.xz"
curl -4 -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Cache-Control: max-age=0' "$url" --retry 50 -L --output "nasm-2.13.01.tar.xz" --fail # -L means "allow redirection" or some odd :|
tar -xf "nasm-2.13.01.tar.xz" || unzip "nasm-2.13.01.tar.xz"
echo "Configuring nasm 2.13.01"
cd nasm-2.13.01
./autogen.sh
./configure --prefix=/usr --exec_prefix=/usr --enable-sections --enable-lto
echo "Make nasm 2.13.01"
make
echo "Installing nasm 2.13.01"
sudo make install # sudo so it copies into /usr folder tree
cd ..
echo "Done Building and Installing nasm 2.13.01"
fi
set +x
#read -p "After nasm build, press Enter to continue"
#------------------------------------------------------------------------------------------------
cd ~/Desktop
sudo chmod 777 -R *
mkdir -v "ffmpeg-windows-build-helpers-withOpenCL-master"
cd ffmpeg-windows-build-helpers-withOpenCL-master
pwd
#rm -f ./cross_compile_ffmpeg-rdp-withOpenCL-v5-gcc7.1.0.sh
#curl -4 -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Cache-Control: max-age=0' https://raw.githubusercontent.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/master/cross_compile_ffmpeg-rdp-withOpenCL-v5-gcc7.1.0.sh -O --fail || exit 1
#sudo chmod 777 -R *
rm -f ./cross_compile_ffmpeg.rdp-mod007.sh
curl -4 -H 'Pragma: no-cache' -H 'Cache-Control: no-cache' -H 'Cache-Control: max-age=0' https://raw.githubusercontent.com/hydra3333/ffmpeg-windows-build-helpers-withOpenCL/master/cross_compile_ffmpeg.rdp-mod007.sh -O --fail || exit 1
sudo chmod 777 -R *
sudo ./cross_compile_ffmpeg.rdp-mod007.sh --cflags='-mtune=generic -O3' --gcc-cpu-count=2 --sandbox-ok=y --build-ffmpeg-shared=n --build-ffmpeg-static=y --disable-nonfree=n --build-x264-with-libav=y --build-intel-qsv=y --build-libmxf=n --build-mp4box=y --build-mplayer=n --build-vlc=n --git-get-latest=y --prefer-stable=n --compiler-flavors=multi --enable-gpl=y --build-lsw=y --enable-opencl=y --high-bitdepth=y --build-aom=y # --build-youtube-dl=y --build-flac=y --build-cuetools=y
exit
|
hydra3333/ffmpeg-windows-build-helpers-withOpenCL
|
archive/rm-v7.sh
|
Shell
|
gpl-3.0
| 3,679 |
#!/usr/bin/env bash
# Start the development server locally. The code is served from the src/ directory.
ROOT=$(cd "$(dirname "$0")"; pwd)
source ${ROOT}/common.sh || exit 1
cd ${ROOT} || error "Could not change to directory $ROOT"
checkDeps gae
log "Starting development server"
python $(get_appengine_path)/dev_appserver.py src/app.yaml || error "Could not start development server locally"
|
yozw/lio-ng
|
devserver.sh
|
Shell
|
gpl-3.0
| 395 |
#!/bin/bash
python /home/pi/scripts/bme280.py | grep Temperature | awk '{print $3}' | cut -c1-4
|
HestiaPi/hestia-touch-openhab
|
home/pi/scripts/getBMEtemp.sh
|
Shell
|
gpl-3.0
| 97 |
#!/usr/bin/env bash
#!/bin/sh
DIR=$(dirname "$0")
if [[ $(git status -s) ]]
then
echo "The working directory is dirty. Please commit any pending changes."
exit 1;
fi
echo "Deleting old publication"
rm -rf public
mkdir public
git worktree prune
rm -rf .git/worktrees/public/
echo "Checking out gh-pages branch into public"
git worktree add -B gh-pages public origin/gh-pages
echo "Removing existing files"
rm -rf public/*
echo "Generating site"
hugo
cp ./CNAME public/CNAME
echo "Updating gh-pages branch"
cd public && git add --all && git commit -m "Publishing to gh-pages (publish.sh)" && cd ..
git push origin gh-pages:gh-pages
|
JointBox/docs
|
publish.sh
|
Shell
|
gpl-3.0
| 645 |
mvn dependency:sources
|
pablodanielrey/phone
|
descargar-sources.sh
|
Shell
|
gpl-3.0
| 24 |
#!/bin/bash
### This script uses atomsk (and some bash scripting)
### to create a screw 1/2 <111> dislocation in iron
### using anisotropic elasticity. In addition a cylinder
### is defined around the dislocation core; atoms inside
### this cylinder are mobile, those outside are fixed.
rm -f Fe_dislo*
### Define a few variables:
### Lattice constant (A)
alat=2.87
### Dimensions of unit cell along X and Y
### This is for crystal orientation X=[121], Y=[-101], Z=[1-11]
uX=$(echo "$alat*sqrt(6.0)" | bc -l)
uY=$(echo "$alat*sqrt(2.0)" | bc -l)
### Length of Burgers vector 1/2 [1-11]
b=$(echo "$alat*0.5*sqrt(3.0)" | bc -l)
### Number of unit cells along X, Y, Z
### This can be changed to make the system bigger or smaller
eX=20
eY=30
eZ=1
### Position of the dislocation (also the center of cylinder)
posX=$(echo "$uX*$eX*0.5"|bc)
posY=$(echo "$uY*(0.5*$eY+0.666666666)"|bc)
### Radius (in A) of cylinder around dislocation core
radius=50.0
### Run atomsk!
### Summary of the command-line parameters:
### Create the unit cell (mode --create) oriented X=[121], Y=[-101], Z=[1-11]
### Expand it to make a supercell (option -expand)
### Read material properties from "Fe_prop.txt" (option -prop)
### (elastic tensor + system orientation)
### Build 1/2 <111> screw dislocation, line along Z (option -disloc)
### (elastic tensor was defined before, so
### anisotropic elasticity will be used)
### Fix atoms at the boundaries (options select and fix)
### Output to XSF, CFG and BOP formats
atomsk --create bcc $alat Fe orient [121] [-101] [1-11] -expand $eX $eY $eZ -prop Fe_prop.txt -disloc $posX $posY screw z y $b 0.0 -select out cylinder z $posX $posY $radius -fix all above -100.0 x Fe_dislo.xsf cfg bop
### Note: dislocation stresses and fixed atoms can be visualized
### with Atomeye: see auxiliary properties in "Fe_dislo.cfg"
|
shigueru/atomsk
|
examples/Fe_disloc_screw111_anisotropy/build_disloc.sh
|
Shell
|
gpl-3.0
| 1,859 |
#!/bin/sh
pkgname=pkg-config
pkgver=0.29.2
vcs=git
gittag=pkg-config-${pkgver}
relmon_id=3649
kiin_make() {
rm -rf glib
mkdir glib
./autogen.sh --prefix=/usr \
--with-internal-glib=no \
--docdir=/usr/share/doc/${pkgname} \
--disable-host-tool
make
}
kiin_install() {
make DESTDIR=${pkgdir} install
}
|
alekseyrybalkin/kiin-repo
|
base/pkg-config/package.sh
|
Shell
|
gpl-3.0
| 347 |
#!/bin/sh
BASE="/media/${USER}/disk/DCIM"
MARKER="${BASE}/.last_imported"
if ! [ -r "$MARKER" ] ; then
echo "Can't find '$MARKER', doing nothing!"
exit 1
fi
TGT="/scratch/pictures/$(date +%Y)/INCOMING"
if ! [ -d "$TGT" ] ; then
echo "Can't find target directory '$TGT', stopping."
exit 2
fi
LAST=$(cat $MARKER)
cd "$BASE"
for file in $(find -type f -newer $LAST -and -not -iname '.last_imported') ; do
cp -v "$file" "$TGT"
done
echo "$file" > "$MARKER"
|
ehrenfeu/simplify
|
misc/import_photos.sh
|
Shell
|
gpl-3.0
| 479 |
#!/bin/bash
set -e
export CC="gcc"
export CFLAGS="-O2 -flto"
export LDFLAGS="-lm -flto -pthread"
./autogen.sh
./configure \
--disable-shared \
--enable-static \
--disable-sse4.1 \
\
--disable-gl \
--disable-sdl \
--disable-png \
--disable-jpeg \
--disable-tiff \
--disable-gif \
--disable-wic \
\
--disable-libwebpmux \
--disable-libwebpdemux \
--disable-libwebpdecoder \
--disable-libwebpextras
make clean
|
TrueCat17/Ren-Engine
|
libs/scripts/conf/libwebp.sh
|
Shell
|
gpl-3.0
| 427 |
cd $ZYNTHIAN_SW_DIR/mod-host
git pull | grep -q -v 'Already up.to.date.' && changed=1
if [[ "$changed" -eq 1 ]]; then
make -j 4
make install
make clean
cd ..
fi
cd $ZYNTHIAN_SW_DIR/mod-ui
git remote remove origin
git remote add origin https://github.com/zynthian/mod-ui.git
git fetch origin zyn-mod-merge
git checkout zyn-mod-merge
git reset --hard origin/zyn-mod-merge
cd utils
make -j 4
if [ ! -d "$ZYNTHIAN_SW_DIR/mod-ui/data" ]; then
mkdir "$ZYNTHIAN_SW_DIR/mod-ui/data"
fi
if [ ! -d "$ZYNTHIAN_SW_DIR/browsepy" ]; then
$ZYNTHIAN_RECIPE_DIR/install_mod-browsepy.sh
fi
|
zynthian/zynthian-sys
|
scripts/recipes.update/update_modui.sh
|
Shell
|
gpl-3.0
| 583 |
#!/bin/sh
set -x # Output executed commands
set -e # Make script fail as soon as one command fails
# Testing main program by sending a bunch of commands via testclient.
# If working correctly the commands should be queued up and performed chronologically.
./testclient 4 0 2
|
eivinwi/EurobotUiO
|
TestClient/lift_up.sh
|
Shell
|
gpl-3.0
| 278 |
#!/usr/bin/ksh
##############################################################################
###
### MLC Gold Image Provisioning
### Created by Cliff Ching
### Date Created: 10 March 2015
###
### Updated by Cliff Ching
### Date Modified: 10 March 2015
### Description: To automate the MLC gold image provisioning which is taking alot of time
##############################################################################
### Ensure this script and the required files are present in the same present/current working directory
### Files required:
### mlc_main_reference.tar.gz
### latest customerrights.xml which contains the license
### startstopscript which contains below
### mlc_start.sh <----Stop start script for MLC
### mlc_stop.sh <----Stop start script for MLC
### mlc_start_proxy.sh <----Stop start script for MLC
### Folders needed from Dimensions:
### eod
### MCM
### REACHP3
### FOCUS
### mxres/common
### mxres/mxmlc
### Variables declaration ###
### General Info
WKDIRECTORY=`pwd`
IMAGENAME=mlc_main_reference
ENV_NAME=uat14
## MX App Info
MXFILESERVER_HOST=10.76.64.196
MXFILESERVER_PORT=11450
MXSITE_NAME=site3uat14
MXNET_PORT=11452
MXXMLSERVER_NAME=10.76.64.196
MXXMLSERVER_PORT=11451
MXSERVER_NAME=10.76.64.196
MXSERVER_PORT=11453
### General DB info
DBHOST_NAME=10.76.96.160
DBSERVER_PORT=1581
DBSERVERSERVICE_NAME=murex21
DBDATABASESCHEMA_NAME=MLCMX3UAT14
DBUSER=MLCMX3UAT14
DBPW=001000b1005000c7001600a000c600d0003000770050
### MLC DB Info for step 10
MLCDBHOST_NAME=10.76.96.160
MLCDBSERVER_PORT=1581
MLCDBSERVERSERVICE_NAME=murex21
MLCDBDATABASESCHEMA_NAME=MLCUAT14
MLCDBUSER=MLCUAT14
MLCDBPW=001000b100500046008700d600800087
### MX DB for step 26
MXDBHOST_NAME=10.76.96.160
MXDBSERVER_PORT=1581
MXDBSERVERSERVICE_NAME=murex21
MXDBDATABASESCHEMA_NAME=MUREXUAT14
MXDBUSER=MUREXUAT14
MXDBPW=0010002000410047001600c60087008100660027
### Actuate DB for step 27
ACTDBHOST_NAME=10.76.96.51
ACTDBSERVER_PORT=1582
ACTDBSERVERSERVICE_NAME=murex11
ACTDBDATABASESCHEMA_NAME=ACTUATEUAT14
ACTDBUSER=ACTUATEUAT14
ACTDBPW=00d0004100210046008700d600c7009100610021000000d0
### For prod to uat conversion use
VARDBDATABASESCHEMA_NAME=VAREWRSUAT14
EWRSDBDATABASESCHEMA_NAME=EWRSDMUAT14
### Functions ###
dos_cleanup(){ ### dos to unix conversion
for a in `find . -name "*.sh"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -name "*.sh"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -name "*.mxres"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -name "*.xml"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -name "*.ksh"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -name "*.cfg"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -name "*.sql"`; do dos2unix $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
sleep 2;
}
prod_cleanup(){ ### prod to uat conversion
for a in `find . -type f -exec egrep -il 'apps\/murex\/prod' {} \; -print`; do sed -e 's/apps\/murex\/prod/apps\/murex\/$ENV_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'apps\/mlc\/prod' {} \; -print`; do sed -e 's/apps\/mlc\/prod/apps\/mlc\/$ENV_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'oracle\/home\/dba\/scripts\/prod' {} \; -print`; do sed -e 's/oracle\/home\/dba\/scripts\/prod/oracle\/home\/dba\/scripts\/$ENV_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'murexprod' {} \; -print`; do sed -e 's/murexprod/$MXDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'murexprod' {} \; -print`; do sed -e 's/MUREXPROD/$MXDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'actuateprod' {} \; -print`; do sed -e 's/actuateprod/$ACTDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'actuateprod' {} \; -print`; do sed -e 's/ACTUATEPROD/$ACTDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'mlcprod' {} \; -print`; do sed -e 's/MLCPROD/$MLCDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'mlcprod' {} \; -print`; do sed -e 's/mlcprod/$MLCDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'mlcprod' {} \; -print`; do sed -e 's/MLCMX3PROD/$DBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'mlcprod' {} \; -print`; do sed -e 's/mlcmx3prod/$DBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'varprod' {} \; -print`; do sed -e 's/varprod/$VARDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'varprod' {} \; -print`; do sed -e 's/VARPROD/$VARDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'ewrsdmprod' {} \; -print`; do sed -e 's/ewrsdmprod/$EWRSDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
for a in `find . -type f -exec egrep -il 'ewrsdmprod' {} \; -print`; do sed -e 's/EWRSDMPROD/$EWRSDBDATABASESCHEMA_NAME/g' $a > $a.tmp && mv $a.tmp $a && echo $a" Fixed"; done
sleep 2;
}
mod_words(){ ### sed for attribute values conversion
sed "s/$1/$2/g" $3 > $3.tmp && mv $3.tmp $3
sleep 2;
}
### Pre-steps ###
# Checking for required files and folders validity
echo "Checking for the valid files and folders..."
if [[ -f ${IMAGENAME}.tar.gz && -f customerrights.xml && -d eod && -d MCM && -d REACHP3 && -d FOCUS && -d mxres/common && -d mxres/mxmlc && -d startstopscript ]];
then
echo "All required files and folders located"
else
echo "Required files or folders not present in present working directory, please upload them now!!! The script will now be halted"
exit 1
fi
sleep 1;
# Untar gold image
echo -en "Unzip the gold image..."
gzip -cd ${IMAGENAME}.tar.gz | tar -xf - && echo "done"
if [[ -d ${IMAGENAME} ]];
then
echo "Gold Image mlc_main_reference.tar.gz uncompress done successfullly"
else
echo "mlc_main_reference.tar.gz did not uncompress properly, script will now exit"
exit 1
fi
# Checking for license validity in the current image
xmllint --format ${IMAGENAME}/fs/license/customerrights.xml | egrep "\<ExpiryDate\>|\<MLCExpiryDate\>"
<ExpiryDate>2014/12/20</ExpiryDate>
<MLCExpiryDate>2014/12/20</MLCExpiryDate>
cp -p customerrights.xml customerrights.xml.`date +%Y%m%d`
###If expired:
###cp /apps/mlc/uat14/fs/license/customerrights.xml /apps/mlc/uat14_new/fs/license/customerrights.xml
###xmllint --format customerrights.xml | egrep "\<ExpiryDate\>|\<MLCExpiryDate\>"
# Clear all old log files in the gold image folder
echo "Removing all old log files in "$IMAGENAME
rm -f ${IMAGENAME}/*.log && echo "logs removed in "$IMAGENAME
rm -f ${IMAGENAME}/logs/*.log && echo "logs removed in "$IMAGENAME"/logs"
sleep 1;
# Remove any possible windows formatting for the folders from Dimensions
echo "dos to unix conversion for Dimension folders..."
cd eod; dos_cleanup && echo "EOD DONE"
cd ../FOCUS; dos_cleanup && echo "FOCUS DONE"
cd ../MCM; dos_cleanup && echo "MCM DONE"
cd ../REACHP3; dos_cleanup && echo "REACHP3 DONE"
cd ../mxres/common; dos_cleanup && echo "mxres/common DONE"
cd ../../mxres/mxmlc; dos_cleanup && echo "mxres/mxmlc DONE"
cd ../../
sleep 1;
# Perform Production to UAT Nameing and path Conversion for the folders from Dimensions
echo "dos to unix conversion for Dimension folders..."
cd eod; prod_cleanup && echo "EOD DONE"
cd ../FOCUS; prod_cleanup && echo "FOCUS DONE"
cd ../MCM; prod_cleanup && echo "MCM DONE"
cd ../REACHP3; prod_cleanup && echo "REACHP3 DONE"
cd ../mxres/common; prod_cleanup && echo "mxres/common DONE"
cd ../../mxres/mxmlc; prod_cleanup && echo "mxres/mxmlc DONE"
cd ../../
sleep 1;
# Merge Dimension folder data with Gold Image
echo "Merging all dimension folders with existing image folders..."
cp -r eod/* ${IMAGENAME}/eod/ && echo "EOD DONE"
cp -r FOCUS/* ${IMAGENAME}/FOCUS/ && echo "FOCUS DONE"
cp -r MCM/* ${IMAGENAME}/MCM/ && echo "MCM DONE"
cp -r REACHP3/* ${IMAGENAME}/REACHP3/ && echo "REACHP3 DONE"
cp -r mxres/common/* ${IMAGENAME}/fs/public/mxres/common/ && echo "mxres/common DONE"
cp -r mxres/mxmlc/* ${IMAGENAME}/fs/public/mxres/mxmlc/ && echo "mxres/mxmlc DONE"
# Push in the stop start script if not available
echo "Checking stop start scripts availability in gold image..."
if [[ -f ${IMAGENAME}/mlc_start.sh ]];
then
chmod 755 ${IMAGENAME}/mlc_start.sh && echo "mlc_start.sh DONE"
else
cp startstopscript/mlc_start.sh ${IMAGENAME}/; chmod 755 ${IMAGENAME}/mlc_start.sh && echo "mlc_start.sh DONE"
fi
if [[ -f ${IMAGENAME}/mlc_stop.sh ]];
then
chmod 755 ${IMAGENAME}/mlc_stop.sh && echo "mlc_stop.sh DONE"
else
cp startstopscript/mlc_stop.sh ${IMAGENAME}/; chmod 755 ${IMAGENAME}/mlc_stop.sh && echo "mlc_stop.sh DONE"
fi
if [[ -f ${IMAGENAME}/mlc_start_proxy.sh ]];
then
chmod 755 ${IMAGENAME}/mlc_start_proxy.sh && echo "mlc_start_proxy.sh DONE"
else
cp startstopscript/mlc_start_proxy.sh ${IMAGENAME}/; chmod 755 ${IMAGENAME}/mlc_start_proxy.sh && echo "mlc_start_proxy.sh DONE"
fi
echo "##################"
echo "### Pre-Step Done"
echo "##################"
sleep 2;
### Main Provisioning Steps ###
# 1. Modifying mxg2000_settings..sh
echo "Checking image mxg2000_settings.sh file for modification validity..."
LN=`egrep "^MXJ_FILESERVER_HOST|^MXJ_FILESERVER_PORT|^XML_SERVER_ARGS|^MXJ_SITE_NAME|^LAUNCHER_ARGS|^MUREXNET_PORT|^MUREXNET_ARGS" ${IMAGENAME}/mxg2000_settings.sh | wc -l`
###MXJ_FILESERVER_HOST=10.76.64.196
###MXJ_FILESERVER_PORT=11450
###XML_SERVER_ARGS="-d64 -Xms512M -Xmx2G -Djava.rmi.server.hostname=10.76.64.196"
###MXJ_SITE_NAME=site3uat14
###LAUNCHER_ARGS="-Djava.rmi.server.hostname=10.76.64.196"
###MUREXNET_PORT=11452
###MUREXNET_ARGS="/IPALTADDR:10.76.64.196 /IPLOG"
###MXMLC_JVM_ARGS="-server -d64 -showversion -Xms16G -Xmx16G
if [ $LN -ne 7 ]
then
echo "Script has detected some new changes in the usual mxg2000_settings.sh file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying mxg2000_settings.sh"
cp -p ${IMAGENAME}/mxg2000_settings.sh ${IMAGENAME}/mxg2000_settings.sh.`date +%Y%m%d`
mod_words "^MXJ_FILESERVER_HOST=.*$" "MXJ_FILESERVER_HOST=${MXFILESERVER_HOST}" "${IMAGENAME}/mxg2000_settings.sh"
mod_words "^MXJ_FILESERVER_PORT=.*$" "MXJ_FILESERVER_PORT=${MXFILESERVER_PORT}" "${IMAGENAME}/mxg2000_settings.sh"
mod_words "^XML_SERVER_ARGS=.*$" "XML_SERVER_ARGS=\"-d64 -Xms512M -Xmx2G -Djava.rmi.server.hostname=${MXFILESERVER_HOST}\"" "${IMAGENAME}/mxg2000_settings.sh"
mod_words "^MXJ_SITE_NAME=.*$" "MXJ_SITE_NAME=$MXSITE_NAME" "${IMAGENAME}/mxg2000_settings.sh"
mod_words "^LAUNCHER_ARGS=.*$" "LAUNCHER_ARGS=\"-Djava.rmi.server.hostname=${MXFILESERVER_HOST}\"" "${IMAGENAME}/mxg2000_settings.sh"
mod_words "^MUREXNET_PORT=.*$" "MUREXNET_PORT=${MXNET_PORT}" "${IMAGENAME}/mxg2000_settings.sh"
mod_words "^MUREXNET_ARGS=.*$" "MUREXNET_ARGS=\"\/IPALTADDR:${MXFILESERVER_HOST} \/IPLOG\"" "${IMAGENAME}/mxg2000_settings.sh"
fi
echo "Modification for mxg2000_settings.sh done"
sleep 1;
# 2. Modifying mxdoc_fs/mxg2000_settings.sh
echo "Checking image mxdoc_fs/mxg2000_settings.sh file for modification validity..."
LN=`egrep "^MXJ_FILESERVER_HOST|^MXJ_FILESERVER_PORT" ${IMAGENAME}/mxdoc_fs/mxg2000_settings.sh | wc -l`
###MXJ_FILESERVER_HOST=10.76.64.196
###MXJ_FILESERVER_PORT=11450
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual mxdoc_fs/mxg2000_settings.sh file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying mxdoc_fs/mxg2000_settings.sh"
cp -p ${IMAGENAME}/mxdoc_fs/mxg2000_settings.sh ${IMAGENAME}/mxdoc_fs/mxg2000_settings.sh.`date +%Y%m%d`
mod_words "^MXJ_FILESERVER_HOST=.*$" "MXJ_FILESERVER_HOST=${MXFILESERVER_HOST}" "${IMAGENAME}/mxdoc_fs/mxg2000_settings.sh"
mod_words "^MXJ_FILESERVER_PORT=.*$" "MXJ_FILESERVER_PORT=${MXFILESERVER_PORT}" "${IMAGENAME}/mxdoc_fs/mxg2000_settings.sh"
fi
echo "Modification for mxdoc_fs/mxg2000_settings.sh done"
sleep 1;
#======================INCOMPLETE SCRIPT================
#GOLDEN Image is using site3uat66
# cat fs/public/mxres/sites/sites.mxres | egrep "site|Host|Port|Destination" | grep -v Description
#3. fs/public/mxres/sites/sites.mxres
###<Name>site3uat14</Name>
###<Host>10.76.64.196</Host>
###<Port>11451</Port>
###<Name>hub1</Name>
###<Host>10.76.64.196</Host>
###<Port>11451</Port>
###<Name>hub2</Name>
###<Name>default</Name>
###<Name>default_http</Name>
###<Destination>site3uat14</Destination>
###<HubDestination>hub1.site3uat14</HubDestination>
###<Destination>site3uat14</Destination>
###<HubDestination>hub2.site3uat14</HubDestination>
###cp -p fs/public/mxres/sites/sites.mxres fs/public/mxres/sites/sites.mxres.`date +%Y%m%d`
# 4. Modifying fs/public/mxres/common/rights.mxres
echo "Checking image fs/public/mxres/common/rights.mxres file for modification validity..."
LN=`grep AuthSite ${IMAGENAME}/fs/public/mxres/common/rights.mxres | wc -l`
###<AuthSite>site3uat14</AuthSite>
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/common/rights.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/common/rights.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/common/rights.mxres ${IMAGENAME}/fs/public/mxres/common/rights.mxres.`date +%Y%m%d`
mod_words "<AuthSite>\([^<][^<]*\)<\/AuthSite>" "<AuthSite>${MXSITE_NAME}<\/AuthSite>" "${IMAGENAME}/fs/public/mxres/common/rights.mxres"
fi
echo "Modification for fs/public/mxres/common/rights.mxres done"
sleep 1;
# 5. Modifying fs/public/mxres/common/dbconfig/dbsource.mxres
echo "Checking image fs/public/mxres/common/dbconfig/dbsource.mxres file for modification validity..."
LN=`egrep "DbHostName|DbServerPortNumber|DbServerOrServiceName|DbDatabaseOrSchemaName|DbUser|DbPassword" ${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres | wc -l`
###<DbHostName>10.76.96.160</DbHostName>
###<DbServerPortNumber>1581</DbServerPortNumber>
###<DbServerOrServiceName>murex21</DbServerOrServiceName>
###<DbDatabaseOrSchemaName>MLCMX3UAT14</DbDatabaseOrSchemaName>
###<DbUser>MLCMX3UAT14</DbUser>
###<DbPassword>001000b1005000c7001600a000c600d0003000770050</DbPassword>
if [ $LN -ne 6 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/common/dbconfig/dbsource.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/common/dbconfig/dbsource.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres ${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres.`date +%Y%m%d`
mod_words "<DbHostName>\([^<][^<]*\)<\/DbHostName>" "<DbHostName>${DBHOST_NAME}<\/DbHostName>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres"
mod_words "<DbServerPortNumber>\([^<][^<]*\)<\/DbServerPortNumber>" "<DbServerPortNumber>${DBSERVER_PORT}<\/DbServerPortNumber>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres"
mod_words "<DbServerOrServiceName>\([^<][^<]*\)<\/DbServerOrServiceName>" "<DbServerOrServiceName>${DBSERVERSERVICE_NAME}<\/DbServerOrServiceName>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres"
mod_words "<DbDatabaseOrSchemaName>\([^<][^<]*\)<\/DbDatabaseOrSchemaName>" "<DbDatabaseOrSchemaName>${DBDATABASESCHEMA_NAME}<\/DbDatabaseOrSchemaName>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres"
mod_words "<DbUser>\([^<][^<]*\)<\/DbUser>" "<DbUser>${DBUSER}<\/DbUser>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres"
mod_words "<DbPassword>\([^<][^<]*\)<\/DbPassword>" "<DbPassword>${DBPW}<\/DbPassword>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsource.mxres"
fi
echo "Modification for fs/public/mxres/common/dbconfig/dbsource.mxres done"
sleep 1;
# 6. Modifying fs/public/mxres/common/dbconfig/dbsourcerep.mxres
echo "Checking image ffs/public/mxres/common/dbconfig/dbsourcerep.mxres file for modification validity..."
LN=`egrep "DbHostName|DbServerPortNumber|DbServerOrServiceName|DbDatabaseOrSchemaName|DbUser|DbPassword" ${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres | wc -l`
###<DbHostName>10.76.96.160</DbHostName>
###<DbServerPortNumber>1581</DbServerPortNumber>
###<DbServerOrServiceName>murex21</DbServerOrServiceName>
###<DbDatabaseOrSchemaName>MLCMX3UAT14</DbDatabaseOrSchemaName>
###<DbUser>MLCMX3UAT14</DbUser>
###<DbPassword>001000b1005000c7001600a000c600d0003000770050</DbPassword>
if [ $LN -ne 6 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/common/dbconfig/dbsourcerep.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres ${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres.`date +%Y%m%d`
mod_words "<DbHostName>\([^<][^<]*\)<\/DbHostName>" "<DbHostName>${DBHOST_NAME}<\/DbHostName>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
mod_words "<DbServerPortNumber>\([^<][^<]*\)<\/DbServerPortNumber>" "<DbServerPortNumber>${DBSERVER_PORT}<\/DbServerPortNumber>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
mod_words "<DbServerOrServiceName>\([^<][^<]*\)<\/DbServerOrServiceName>" "<DbServerOrServiceName>${DBSERVERSERVICE_NAME}<\/DbServerOrServiceName>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
mod_words "<DbDatabaseOrSchemaName>\([^<][^<]*\)<\/DbDatabaseOrSchemaName>" "<DbDatabaseOrSchemaName>${DBDATABASESCHEMA_NAME}<\/DbDatabaseOrSchemaName>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
mod_words "<DbUser>\([^<][^<]*\)<\/DbUser>" "<DbUser>${DBUSER}<\/DbUser>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
mod_words "<DbPassword>\([^<][^<]*\)<\/DbPassword>" "<DbPassword>${DBPW}<\/DbPassword>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
fi
echo "Modification for fs/public/mxres/common/dbconfig/dbsourcerep.mxres"
sleep 1;
# 7. Verifying fs/public/mxres/common/dbconfig/mxservercredential.mxres
echo "Verifying image fs/public/mxres/common/dbconfig/mxservercredential.mxres file for information..."
LN=`egrep "<MxAnchor Code=\"CTLIBDefaultCredential\">|<MxAnchor Code=\"MXMLCredential\">" ${IMAGENAME}/fs/public/mxres/common/dbconfig/mxservercredential.mxres | wc -l`
###<MxAnchor Code="CTLIBDefaultCredential">
###<MxAnchor Code="MXMLCredential">
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/common/dbconfig/mxservercredential.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Verified for fs/public/mxres/common/dbconfig/mxservercredential.mxres"
fi
sleep 1;
# 8. Modifying fs/public/mxres/common/dbconfig/murexnet.mxres
echo "Checking image fs/public/mxres/common/dbconfig/murexnet.mxres file for modification validity..."
LN=`grep DefaultCommand ${IMAGENAME}/fs/public/mxres/common/dbconfig/murexnet.mxres | wc -l`
###<DefaultCommand>/IPHOST:10.76.64.196:11452</DefaultCommand><!--MANDATORY: MUREXNET_HOST and MUREXNET_PORT should be modified-->
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/common/dbconfig/murexnet.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/common/dbconfig/murexnet.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/common/dbconfig/murexnet.mxres ${IMAGENAME}/fs/public/mxres/common/dbconfig/murexnet.mxres.`date +%Y%m%d`
mod_words "<DefaultCommand>\([^<][^<]*\)<\/DefaultCommand>" "<DefaultCommand>\/IPHOST\:${MXSERVER_NAME}\:${MXNET_PORT}<\/DefaultCommand>" "${IMAGENAME}/fs/public/mxres/common/dbconfig/murexnet.mxres"
fi
echo "Modification for fs/public/mxres/common/dbconfig/murexnet.mxres done"
sleep 1;
# 9. Verifying fs/public/mxres/common/dbconfig/dbsourcemapping.mxres
# copy the whole file
###9. fs/public/mxres/common/dbconfig/dbsourcemapping.mxres -> no need to do anything for this step
### a. Update the DBSource File Mapping with the physical file name and location
# 10. Modifying fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres
echo "Checking image fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres file for modification validity..."
LN=`egrep "DbHostName|DbServerPortNumber|DbServerOrServiceName|DbDatabaseOrSchemaName|DbUser|DbPassword" ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres | wc -l`
###<DbHostName>10.76.96.160</DbHostName>
###<DbServerPortNumber>1581</DbServerPortNumber>
###<DbServerOrServiceName>murex21</DbServerOrServiceName>
###<DbDatabaseOrSchemaName>MLCUAT14</DbDatabaseOrSchemaName>
###<DbUser>MLCUAT14</DbUser>
###<DbPassword>001000b100500046008700d600800087</DbPassword>
if [ $LN -ne 6 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres.`date +%Y%m%d`
mod_words "<DbHostName>\([^<][^<]*\)<\/DbHostName>" "<DbHostName>${MLCDBHOST_NAME}<\/DbHostName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
mod_words "<DbServerPortNumber>\([^<][^<]*\)<\/DbServerPortNumber>" "<DbServerPortNumber>${MLCDBSERVER_PORT}<\/DbServerPortNumber>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
mod_words "<DbServerOrServiceName>\([^<][^<]*\)<\/DbServerOrServiceName>" "<DbServerOrServiceName>${MLCDBSERVERSERVICE_NAME}<\/DbServerOrServiceName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
mod_words "<DbDatabaseOrSchemaName>\([^<][^<]*\)<\/DbDatabaseOrSchemaName>" "<DbDatabaseOrSchemaName>${MLCDBDATABASESCHEMA_NAME}<\/DbDatabaseOrSchemaName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
mod_words "<DbUser>\([^<][^<]*\)<\/DbUser>" "<DbUser>${MLCDBUSER}<\/DbUser>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
mod_words "<DbPassword>\([^<][^<]*\)<\/DbPassword>" "<DbPassword>${MLCDBPW}<\/DbPassword>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres"
fi
echo "Modification for fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres done"
sleep 1;
# 11. Modifying fs/public/mxres/mxmlc/mlc_properties.mxres
echo "Checking image fs/public/mxres/mxmlc/mlc_properties.mxres file for modification validity..."
LN=`egrep "fsHost|fsPort|mxSiteName|xmlsHost|xmlsPort|mxDestinationSiteName|serverHost|serverPort" ${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres | wc -l`
###<Property><Code>murex.limits.progs.client.xmls.fsHost</Code><Value>10.76.64.196</Value></Property>
###<Property><Code>murex.limits.progs.client.xmls.fsPort</Code><Value>11450</Value></Property>
###<Property><Code>murex.limits.progs.client.xmls.mxSiteName</Code><Value>site3uat14</Value></Property>
###<Property><Code>murex.limits.progs.client.xmls.xmlsHost</Code><Value>10.76.64.196</Value></Property>
###<Property><Code>murex.limits.progs.client.xmls.xmlsPort</Code><Value>11451</Value></Property>
###<Property><Code>murex.limits.progs.client.xmls.mxDestinationSiteName</Code><Value>site3uat14</Value></Property>
###<Property><Code>murex.limits.progs.server.serverHost</Code><Value>10.76.64.196</Value></Property>
###<Property><Code>murex.limits.progs.server.serverPort</Code><Value>11453</Value></Property>
if [ $LN -ne 8 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlc/mlc_properties.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlc/mlc_properties.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres ${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres.`date +%Y%m%d`
mod_words "fsHost<\/Code><Value>\([^<][^<]*\)<\/Value>" "fsHost<\/Code><Value>${MXFILESERVER_HOST}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "fsPort<\/Code><Value>\([^<][^<]*\)<\/Value>" "fsPort<\/Code><Value>${MXFILESERVER_PORT}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "mxSiteName<\/Code><Value>\([^<][^<]*\)<\/Value>" "mxSiteName<\/Code><Value>${MXSITE_NAME}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "xmlsHost<\/Code><Value>\([^<][^<]*\)<\/Value>" "xmlsHost<\/Code><Value>${MXXMLSERVER_NAME}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "xmlsPort<\/Code><Value>\([^<][^<]*\)<\/Value>" "xmlsPort<\/Code><Value>${MXXMLSERVER_PORT}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "mxDestinationSiteName<\/Code><Value>\([^<][^<]*\)<\/Value>" "mxDestinationSiteName<\/Code><Value>${MXSITE_NAME}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "serverHost<\/Code><Value>\([^<][^<]*\)<\/Value>" "serverHost<\/Code><Value>${MXSERVER_NAME}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
mod_words "serverPort<\/Code><Value>\([^<][^<]*\)<\/Value>" "serverPort<\/Code><Value>${MXSERVER_PORT}<\/Value>" "${IMAGENAME}/fs/public/mxres/mxmlc/mlc_properties.mxres"
fi
echo "Modification for fs/public/mxres/mxmlc/mlc_properties.mxres done"
sleep 1;
# 12. Modifying fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres
echo "Checking image fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres file for modification validity..."
LN=`egrep "port|MXJ_SITE_NAME" ${IMAGENAME}/fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres | wc -l`
###<port>11453</port>
###<parameter name="/MXJ_SITE_NAME:" >site3uat14</parameter>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres ${IMAGENAME}/fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres.`date +%Y%m%d`
mod_words "<port>\([^<][^<]*\)<\/port>" "<port>${MXSERVER_PORT}<\/port>" "${IMAGENAME}/fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres"
mod_words "<parameter name=\"\/MXJ_SITE_NAME:\" >\([^<][^<]*\)<\/parameter>" "<parameter name=\"\/MXJ_SITE_NAME:\" >${MXSITE_NAME}<\/parameter>" "${IMAGENAME}/fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres"
fi
echo "Modification for fs/public/mxres/mxmlc/mxg2000_proxy/configuration.mxres done"
sleep 1;
# 13. Modifying fs/public/mxres/mxmlc/migration/migration.props
echo "Checking image fs/public/mxres/mxmlc/migration/migration.props file for modification validity..."
LN=`grep mxSiteName ${IMAGENAME}/fs/public/mxres/mxmlc/migration/migration.props | wc -l`
###murex.limits.progs.client.xmls.mxSiteName=site3uat14
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlc/migration/migration.props file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlc/migration/migration.props"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlc/migration/migration.props ${IMAGENAME}/fs/public/mxres/mxmlc/migration/migration.props.`date +%Y%m%d`
mod_words "mxSiteName=.*$" "mxSiteName=${MXSITE_NAME}" "${IMAGENAME}/fs/public/mxres/mxmlc/migration/migration.props"
fi
echo "Modification for fs/public/mxres/mxmlc/migration/migration.props done"
sleep 1;
# 14. Modifying fs/public/mxres/loginimpl/location.mxres
echo "Checking image fs/public/mxres/loginimpl/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/loginimpl/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/loginimpl/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/loginimpl/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/loginimpl/location.mxres ${IMAGENAME}/fs/public/mxres/loginimpl/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/loginimpl/location.mxres"
fi
echo "Modification for fs/public/mxres/loginimpl/location.mxres done"
sleep 1;
# 15. Modifying fs/public/mxres/odr/assembly/location.mxres
echo "Checking image fs/public/mxres/odr/assembly/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/odr/assembly/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/odr/assembly/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/odr/assembly/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/odr/assembly/location.mxres ${IMAGENAME}/fs/public/mxres/odr/assembly/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/odr/assembly/location.mxres"
fi
echo "Modification for fs/public/mxres/odr/assembly/location.mxres done"
sleep 1;
# 16. Modifying fs/public/mxres/mxdispatcher/location.mxres
echo "Checking image fs/public/mxres/mxdispatcher/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxdispatcher/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxdispatcher/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxdispatcher/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxdispatcher/location.mxres ${IMAGENAME}/fs/public/mxres/mxdispatcher/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxdispatcher/location.mxres"
fi
echo "Modification for fs/public/mxres/mxdispatcher/location.mxres done"
sleep 1;
# 17. Modifying fs/public/mxres/mxcache/location.mxres
echo "Checking image fs/public/mxres/mxcache/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxcache/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxcache/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxcache/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxcache/location.mxres ${IMAGENAME}/fs/public/mxres/mxcache/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxcache/location.mxres"
fi
echo "Modification for fs/public/mxres/mxcache/location.mxres done"
sleep 1;
# 18. Modifying fs/public/mxres/mxhibernate/location.mxres
echo "Checking image fs/public/mxres/mxhibernate/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxhibernate/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxhibernate/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxhibernate/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxhibernate/location.mxres ${IMAGENAME}/fs/public/mxres/mxhibernate/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxhibernate/location.mxres"
fi
echo "Modification for fs/public/mxres/mxhibernate/location.mxres done"
sleep 1;
# 19. Modifying fs/public/mxres/mxlock/location.mxres
echo "Checking image fs/public/mxres/mxlock/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxlock/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxlock/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxlock/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxlock/location.mxres ${IMAGENAME}/fs/public/mxres/mxlock/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxlock/location.mxres"
fi
echo "Modification for fs/public/mxres/mxlock/location.mxres done"
sleep 1;
# 20. Modifying fs/public/mxres/mxmaps/location.mxres
echo "Checking image fs/public/mxres/mxmaps/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxmaps/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmaps/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmaps/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmaps/location.mxres ${IMAGENAME}/fs/public/mxres/mxmaps/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxmaps/location.mxres"
fi
echo "Modification for fs/public/mxres/mxmaps/location.mxres done"
sleep 1;
# 21. Modifying fs/public/mxres/mxmlexchange/amendmentagent/location.mxres
echo "Checking image fs/public/mxres/mxmlexchange/amendmentagent/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxmlexchange/amendmentagent/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlexchange/amendmentagent/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlexchange/amendmentagent/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlexchange/amendmentagent/location.mxres ${IMAGENAME}/fs/public/mxres/mxmlexchange/amendmentagent/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxmlexchange/amendmentagent/location.mxres"
fi
echo "Modification for fs/public/mxres/mxmlexchange/amendmentagent/location.mxres done"
sleep 1;
# 22. Modifying fs/public/mxres/mxmlexchange/location.mxres
echo "Checking image fs/public/mxres/mxmlexchange/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxmlexchange/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlexchange/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlexchange/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlexchange/location.mxres ${IMAGENAME}/fs/public/mxres/mxmlexchange/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxmlexchange/location.mxres"
fi
echo "Modification for fs/public/mxres/mxmlexchange/location.mxres done"
sleep 1;
# 23. Modifying fs/public/mxres/mxwarehouse/location.mxres
echo "Checking image fs/public/mxres/mxwarehouse/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/mxwarehouse/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 2 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxwarehouse/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxwarehouse/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxwarehouse/location.mxres ${IMAGENAME}/fs/public/mxres/mxwarehouse/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/mxwarehouse/location.mxres"
fi
echo "Modification for fs/public/mxres/mxwarehouse/location.mxres done"
sleep 1;
# 24. Modifying fs/public/mxres/odr/location.mxres
echo "Checking image fs/public/mxres/odr/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/odr/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 1 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/odr/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/odr/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/odr/location.mxres ${IMAGENAME}/fs/public/mxres/odr/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/odr/location.mxres"
fi
echo "Modification for fs/public/mxres/odr/location.mxres done"
sleep 1;
# 25. Modifying fs/public/mxres/smc/location.mxres
echo "Checking image fs/public/mxres/smc/location.mxres file for modification validity..."
LN=`grep SiteName ${IMAGENAME}/fs/public/mxres/smc/location.mxres | wc -l`
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
###<SiteName>site3uat14</SiteName>
if [ $LN -ne 4 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/smc/location.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/smc/location.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/smc/location.mxres ${IMAGENAME}/fs/public/mxres/smc/location.mxres.`date +%Y%m%d`
mod_words "<SiteName>\([^<][^<]*\)<\/SiteName>" "<SiteName>${MXSITE_NAME}<\/SiteName>" "${IMAGENAME}/fs/public/mxres/smc/location.mxres"
fi
echo "Modification for fs/public/mxres/smc/location.mxres done"
sleep 1;
# 26. Modifying fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres
echo "Checking image fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres file for modification validity..."
LN=`egrep "DbHostName|DbServerPortNumber|DbServerOrServiceName|DbDatabaseOrSchemaName|DbUser|DbPassword" ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres | wc -l`
###<DbHostName>10.76.96.160</DbHostName>
###<DbServerPortNumber>1581</DbServerPortNumber>
###<DbServerOrServiceName>murex21</DbServerOrServiceName>
###<DbDatabaseOrSchemaName>MUREXUAT14</DbDatabaseOrSchemaName>
###<DbUser>MUREXUAT14</DbUser>
###<DbPassword>0010002000410047001600c60087008100660027</DbPassword>
if [ $LN -ne 6 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres.`date +%Y%m%d`
mod_words "<DbHostName>\([^<][^<]*\)<\/DbHostName>" "<DbHostName>${MXDBHOST_NAME}<\/DbHostName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
mod_words "<DbServerPortNumber>\([^<][^<]*\)<\/DbServerPortNumber>" "<DbServerPortNumber>${MXDBSERVER_PORT}<\/DbServerPortNumber>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
mod_words "<DbServerOrServiceName>\([^<][^<]*\)<\/DbServerOrServiceName>" "<DbServerOrServiceName>${MXDBSERVERSERVICE_NAME}<\/DbServerOrServiceName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
mod_words "<DbDatabaseOrSchemaName>\([^<][^<]*\)<\/DbDatabaseOrSchemaName>" "<DbDatabaseOrSchemaName>${MXDBDATABASESCHEMA_NAME}<\/DbDatabaseOrSchemaName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
mod_words "<DbUser>\([^<][^<]*\)<\/DbUser>" "<DbUser>${MXDBUSER}<\/DbUser>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
mod_words "<DbPassword>\([^<][^<]*\)<\/DbPassword>" "<DbPassword>${MXDBPWs}<\/DbPassword>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres"
fi
echo "Modification for fs/public/mxres/mxmlc/dbconfig/dbsource_mlc.mxres done"
sleep 1;
# 27. Modifying fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres
echo "Checking image fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres file for modification validity..."
LN=`egrep "DbHostName|DbServerPortNumber|DbServerOrServiceName|DbDatabaseOrSchemaName|DbUser|DbPassword" ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_reset.mxres | wc -l`
###<DbHostName>10.76.96.160</DbHostName>
###<DbServerPortNumber>1581</DbServerPortNumber>
###<DbServerOrServiceName>murex21</DbServerOrServiceName>
###<DbDatabaseOrSchemaName>MUREXUAT14</DbDatabaseOrSchemaName>
###<DbUser>MUREXUAT14</DbUser>
###<DbPassword>0010002000410047001600c60087008100660027</DbPassword>
if [ $LN -ne 6 ]
then
echo "Script has detected some new changes in the usual fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres file; Will exit the script now for you to check."
exit 0;
else
echo "Modifying fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
cp -p ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres ${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres.`date +%Y%m%d`
mod_words "<DbHostName>\([^<][^<]*\)<\/DbHostName>" "<DbHostName>${ACTDBHOST_NAME}<\/DbHostName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
mod_words "<DbServerPortNumber>\([^<][^<]*\)<\/DbServerPortNumber>" "<DbServerPortNumber>${ACTDBSERVER_PORT}<\/DbServerPortNumber>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
mod_words "<DbServerOrServiceName>\([^<][^<]*\)<\/DbServerOrServiceName>" "<DbServerOrServiceName>${ACTDBSERVERSERVICE_NAME}<\/DbServerOrServiceName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
mod_words "<DbDatabaseOrSchemaName>\([^<][^<]*\)<\/DbDatabaseOrSchemaName>" "<DbDatabaseOrSchemaName>${ACTDBDATABASESCHEMA_NAME}<\/DbDatabaseOrSchemaName>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
mod_words "<DbUser>\([^<][^<]*\)<\/DbUser>" "<DbUser>${ACTDBUSER}<\/DbUser>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
mod_words "<DbPassword>\([^<][^<]*\)<\/DbPassword>" "<DbPassword>${ACTDBPW}<\/DbPassword>" "${IMAGENAME}/fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres"
fi
echo "Modification for fs/public/mxres/mxmlc/dbconfig/dbsource_mlc_upload.mxres done"
sleep 1;
# Changing folder name
mv ${WKDIRECTORY}/${IMAGENAME} ${WKDIRECTORY}/${ENV_NAME}
echo "#######################"
echo "### MLC Provision Done"
echo "#######################"
sleep 2;
###Need to check $JAVAHOME and $ORACLE_HOME
echo $JAVEHOME
echo $ORACLE_HOME
###Memory for MxML server should be set to 6G - 6G
|
cliffusion/script
|
mlc_provision.sh
|
Shell
|
gpl-3.0
| 44,015 |
:
# $APPASERVER_HOME/utility/sed_data_directory.sh
# ----------------------------------------------
# Freely available software: see Appaserver.org
# ----------------------------------------------
if [ "$#" -lt 1 ]
then
echo "Usage: $0 data_directory [index_directory]" 1>&2
exit 1
fi
data_directory=$1
if [ "$#" -eq 2 ]
then
index_directory=$2
fi
DD=$data_directory
ID=$index_directory
if [ "$data_directory" = "" -a "$index_directory" = "" ]
then
cat
elif [ "$ID" != "" ]
then
sed "s|ENGINE=MyISAM|data directory='$DD' index directory='$ID' &|"
else
sed "s|ENGINE=MyISAM|data directory='$DD' &|"
fi
exit $?
|
timhriley/appaserver
|
utility/sed_data_directory.sh
|
Shell
|
gpl-3.0
| 622 |
#!/bin/sh
set -e
# Find uncertainty (position x momentum) for wells of varying width
#
# This script is part of the QWWAD software suite. Any use of this code
# or its derivatives in published work must be accompanied by a citation
# of:
# P. Harrison and A. Valavanis, Quantum Wells, Wires and Dots, 4th ed.
# Chichester, U.K.: J. Wiley, 2016, ch.3
#
# (c) Copyright 1996-2016
# Alex Valavanis <[email protected]>
#
# QWWAD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QWWAD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QWWAD. If not, see <http://www.gnu.org/licenses/>.
# Initialise files
outfile=uncertainty-principle-finite-well.dat
rm -f $outfile
# Loop for different well widths
for LW in 20 30 40 50 60 70 80 90 100 120 140 160 180 200; do
# Calculate ground state energy and wave function as a function
# of well width for GaAs
qwwad_ef_square_well --wellwidth $LW
# Search for line in standard output from hup and write to file
data=`qwwad_uncertainty | awk '/Delta_z.Delta_p/{printf("%8.3f\n",$2)}'`
printf "%d\t%s\n" $LW $data >> $outfile
done
cat << EOF
Results have been written to $outfile in the format:
COLUMN 1 - Well width [angstrom]
COLUMN 2 - Uncertainty [x hbar/(2 pi)]
This script is part of the QWWAD software suite.
(c) Copyright 1996-2016
Alex Valavanis <[email protected]>
Paul Harrison <[email protected]>
Report bugs to https://bugs.launchpad.net/qwwad
EOF
# Clean up workspace
rm -f *.r
|
QWWAD/qwwad
|
examples/numerical-solutions/uncertainty-principle-finite-well.sh
|
Shell
|
gpl-3.0
| 1,933 |
if [ ! -d "$1" ]; then
echo "Must pass git directory as the first argument."
exit 2;
fi
cd $1
echo -e "Enter new version (Current `git describe --abbrev=0 --tags`): \c "
while read ver; do
if ([[ ! -z "$ver" ]]) && ([ "$ver" == "`echo $ver | grep "^[0-9]\{1,\}\.[0-9]\{1,\}\.[0-9]\{1,\}$"`" ])
then
break
else
echo " Version entered ($ver) was not formatted properly."
echo -e "Enter new version (Current `git describe --abbrev=0 --tags`): \c "
fi
done
echo -e "Are you sure you want to release version $ver? (y|n): \c "
read confirm
if([ $confirm == "y" ]) then
git checkout -b release-$ver develop
git checkout master
result=`git merge --no-ff release-$ver`
if([ "$result" == "`echo $result | grep "^Already up-to-date\."`" ]) then
git branch -D release-$ver
echo "The branch is already up to date. Aborting version bump.";
exit 3;
else
git tag -a $ver -m "Used bump script."
git branch -D release-$ver
echo ""
echo "If you need to undo this last commit, abort now and run:"
echo " git reset --hard HEAD~1"
echo " git tag -d $ver"
echo ""
echo -e "Push this change to the origin server and merge back into the develop branch? (y|n): \c "
read confirm
if([ $confirm == "y" ]) then
git push origin master
git checkout develop
git merge master
fi
echo "Finished merging."
exit 1;
fi
else
echo "Aborted the version bump."
exit 1;
fi
|
necrose99/qt4-fsarchiver
|
bump.sh
|
Shell
|
gpl-3.0
| 1,583 |
#!/bin/bash
echo "Note: you need to be in nios2_command_shell to run this script"
sof2flash --input=recon_1.sof --output=recon_1.flash --epcs --programmingmode=as
nios2-elf-objcopy -I srec -O ihex recon_1.flash recon_1.hex
|
jefflieu/recon
|
hw/recon_1/civ10/output_files/convert_sof_to_hex.sh
|
Shell
|
gpl-3.0
| 225 |
#!/bin/bash
#version 3
#Copyright 2011 George Anastasiou
#This file is part of XAMPPadmin.
#
# XAMPPadmin is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# XAMPPadmin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XAMPPadmin. If not, see <http://www.gnu.org/licenses/>.
clear
echo "This programm will install Wordpress"
echo "What path would you like to use??(i.e if you type blog the visit path will be http://localhost/blog):"
read path
echo "Installation will start in 5 seconds..."
sleep 5
cd /tmp
echo "Downloading files..."
wget "http://wordpress.org/latest.tar.gz" -O WP.tar.gz
echo "Unpackaging data and copying..."
sudo tar xvfz WP.tar.gz
sudo mv wordpress /opt/lampp/htdocs/$path
echo "done..."
echo "Now enter http://localhost/$path and setup your wordpress"
echo "Exiting..."
sleep 5
|
gsiou/XAMPPadmin
|
wordpress_installer.sh
|
Shell
|
gpl-3.0
| 1,281 |
#!/bin/bash
source ~/.bash_profile
# LOAD_USER_FUNTION
#gnash는 일부 파일 경로에서 정상적으로 파일을 찾아 오지 못합니다. 이를 해결하기 위해, 불러올 파일에 대한 심볼릭 링크를 생성시킨뒤, 원본 대신 링크에 접근합니다.
temp=/tmp/.swfopen
mkdir -- "$temp"
link=$temp/$RANDOM$RANDOM$RANDOM.swf
ln -s "$1" "$link"
gnash -- "$link"
#gnash-gtk-launcher
exit
|
Thestar3Preservation/ScriptPack
|
Bash Shell Script/swfopen.sh
|
Shell
|
gpl-3.0
| 415 |
#!/bin/bash
set -ev
SCRIPTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $SCRIPTDIR/../funcs.sh
VERSION="$(get_version .)"
#Only build if the commit we are building is for the last tag
if [ "$(git rev-list -n 1 $VERSION)" != "$(git rev-parse HEAD)" ]; then
echo "Not uploading package"
exit 0
fi
# Debian package
docker exec mc-deb bash /scripts/build_source.sh $VERSION xenial
docker exec mc-deb bash /scripts/build_source.sh $VERSION bionic
docker exec mc-deb bash /scripts/build_source.sh $VERSION focal
docker exec mc-deb bash /scripts/build_source.sh $VERSION hirsute
docker exec mc-deb bash /scripts/build_source.sh $VERSION jammy
docker exec mc-deb bash /scripts/build_source.sh $VERSION impish
#windows and appimage
docker exec appimgbuilder bash /scripts/package.sh
docker exec winbuilder bash -c "export CODESIGN_WIN_PASS=${CODESIGN_WIN_PASS}; /scripts/package.sh"
#prepare files to upload volume
mkdir -p $HOME/uploads
cp -v \
packages/*.{exe,zip} \
build-appimage/*.AppImage \
$HOME/uploads/
EXE_FILE="$(ls $HOME/uploads/*.exe 2> /dev/null | head -n 1)"
APPIMAGE_FILE=$(find $HOME/uploads -iname '*.AppImage')
#create json (only used by testing builds)
cat > $HOME/uploads/updater.json <<EOF
[{
"tag_name": "$VERSION",
"html_url": "https://mooltipass-tests.com/mc_betas/$VERSION",
"body": "",
"assets": [
{
"name": "$(basename $EXE_FILE)",
"browser_download_url": "https://mooltipass-tests.com/mc_betas/$VERSION/$(basename $EXE_FILE)"
},
{
"name": "$(basename $APPIMAGE_FILE)",
"browser_download_url": "https://mooltipass-tests.com/mc_betas/$VERSION/$(basename $APPIMAGE_FILE)"
}
]
}]
EOF
docker run -t --name mc-upload -d \
-v $HOME/uploads:/uploads \
-e "GITHUB_LOGIN=${GITHUB_LOGIN}" \
-e "GITHUB_TOKEN=${GITHUB_TOKEN}" \
mooltipass/mc-upload
for f in $HOME/uploads/*
do
ff=$(basename $f)
echo uploading $ff
if [ -f $HOME/uploads/$ff ]
then
docker exec mc-upload bash -c "export SFTP_USER=${MC_BETA_UPLOAD_SFTP_USER} SFTP_PASS=${MC_BETA_UPLOAD_SFTP_PASS} ; /scripts/upload.sh $VERSION $ff"
fi
done
# Trigger new build on OBS
# Requires the following list of environment variables set
# - OBS_API, for example "https://api.opensuse.org"
# - OBS_PROJ, for example "home:someuser:someproject"
# - OBS_USER, the user to authenticate as against the OBS_API
# - OBS_PASS, the user's password
# Just to be clear. The revision *must* be a git tag matching the python regex
# v([0-9\.]*)(-testing)?(.*)
REVISION="$VERSION"
OBS_COMMIT_MSG="Update package to revision ${REVISION}"
OBS_PKG="moolticute"
if endsWith -testing "$VERSION"; then
OBS_PKG="moolticute-testing"
fi
# Fetch current _service file to CWD
curl -u $OBS_USER:$OBS_PASS -X GET ${OBS_API}/source/${OBS_PROJ}/${OBS_PKG}/_service -o _service
# Update revision to build
sed -i "s|<param name=\"revision\">\(.*\)</param>|<param name=\"revision\">${REVISION}</param>|" _service
# Push modified _service file back to OBS
curl -u $OBS_USER:$OBS_PASS -X PUT -T _service ${OBS_API}/source/${OBS_PROJ}/${OBS_PKG}/_service
# Commit changes, which will trigger a new build and eventually release
curl -u $OBS_USER:$OBS_PASS -X POST ${OBS_API}/source/${OBS_PROJ}/${OBS_PKG}?cmd=commit --data-urlencode "comment=${OBS_COMMIT_MSG}"
|
mooltipass/moolticute
|
scripts/ci/linux/after_success.sh
|
Shell
|
gpl-3.0
| 3,381 |
#!/usr/bin/env bash
##########################################################
#
# Copyright (C) 2015 Eduardo Dimas (https://github.com/eddimas/nagios-plugins)
# Copyright (C) 2009 Mike Adolphs (http://www.matejunkie.com/)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##########################################################
PROGNAME=$(basename -s .sh $0)
TIMESTAMP=$(date +%Y-%m-%d_%H.%M.%S)
VERSION="Version 1.2,"
AUTHOR="2015, Eduardo Dimas (https://github.com/eddimas/nagios-plugins)"
TEMP_FILE="/var/tmp/${PROGNAME}_${TIMESTAMP}.log"; touch ${TEMP_FILE}
COMMAND='cat /proc/stat'
ST_OK=0
ST_WR=1
ST_CR=2
ST_UK=3
interval=5
print_version() {
printf "%s %s\n" "$VERSION" "$AUTHOR"
}
help() {
cat << END
Usage :
$PROGNAME -l [STRING] -H [STRING] -i [VALUE] -w [VALUE] -c [VALUE]
OPTION DESCRIPTION
----------------------------------
-h Help
-l [STRING] Remote user
-H [STRING] Host name
-i [VALUE] Defines the period where the statistic data will be collected.
Default is: 5 samples, one sample by second
-w [VALUE] Sets a warning level for CPU user.
Default is: on
-c [VALUE] Sets a critical level for CPU user.
Default is: on
----------------------------------
Note : [VALUE] must be an integer.
END
}
if [ $# -ne 10 ]
then
help;
exit 3;
fi
while getopts "l:H:i:w:c:" OPT
do
case $OPT in
l) USERNAME="$OPTARG" ;;
H) HOSTNAME="$OPTARG" ;;
i) interval="$OPTARG" ;;
w) warn="$OPTARG" ;;
c) crit="$OPTARG" ;;
*) help ;;
esac
done
val_wcdiff() {
if [ ${warn} -gt ${crit} ]
then
wcdiff=1
fi
}
get_cpuvals() {
nTimes=$1
SSH_COMMAND=$()
for ((x=0;x<=${nTimes};x++)); do
ssh -l ${USERNAME} ${HOSTNAME} -C ${COMMAND} > ${TEMP_FILE}
TEMP_VAR=();
for ((y=2;y<=8;y++)); do
TEMP_VAR+=( $(grep -m1 '^cpu' ${TEMP_FILE} |awk -v var="$y" '{print $var}') );
done
cpu_user+=( ${TEMP_VAR[0]} );
cpu_nice+=( ${TEMP_VAR[1]} );
cpu_sys+=( ${TEMP_VAR[2]} );
cpu_idle+=( ${TEMP_VAR[3]} );
cpu_iowait+=( ${TEMP_VAR[4]} );
cpu_irq+=( ${TEMP_VAR[5]} );
cpu_softirq+=( ${TEMP_VAR[6]} );
cpu_total+=( $(expr ${cpu_user} + ${cpu_nice} + ${cpu_sys} + ${cpu_idle} + ${cpu_iowait} + ${cpu_irq} + ${cpu_softirq}) );
avg_cpu_user=$( expr $avg_cpu_user + ${cpu_user[$x]} ) ;
avg_cpu_nice=$( expr $avg_cpu_nice + ${cpu_nice[$x]} ) ;
avg_cpu_sys=$( expr $avg_cpu_sys + ${cpu_sys[$x]} ) ;
avg_cpu_idle=$( expr $avg_cpu_idle + ${cpu_idle[$x]} ) ;
avg_cpu_iowait=$( expr $avg_cpu_iowait + ${cpu_iowait[$x]} ) ;
avg_cpu_irq=$( expr $avg_cpu_irq + ${cpu_irq[$x]} ) ;
avg_cpu_softirq=$( expr $avg_cpu_softirq + ${cpu_softirq[x]} ) ;
avg_cpu_total=$( expr $avg_cpu_total + ${cpu_total[$x]} ) ;
sleep 1
done;
cpu_user=$(echo "scale=2; (1000*(${avg_cpu_user}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_nice=$(echo "scale=2; (1000*(${avg_cpu_nice}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_sys=$(echo "scale=2; (1000*(${avg_cpu_sys}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_idle=$(echo "scale=2; (1000*(${avg_cpu_idle}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_iowait=$(echo "scale=2; (1000*(${avg_cpu_iowait}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_irq=$(echo "scale=2; (1000*(${avg_cpu_irq}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_softirq=$(echo "scale=2; (1000*(${avg_cpu_softirq}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_total=$(echo "scale=2; (1000*(${avg_cpu_total}/${nTimes})/(${avg_cpu_total}/${nTimes}))/10" | bc -l | sed 's/^\./0./');
cpu_usage=$(echo "(${cpu_user}+${cpu_nice}+${cpu_sys}+${cpu_iowait}+${cpu_irq}+${cpu_softirq})/1" | bc);
rm ${TEMP_FILE}
}
#CPU OK : user=0% system=0% iowait=0% idle=99%
#Performance Data: cpu_user=0%;80;90; cpu_sys=0%;70;90; cpu_iowait=0%;40;60; cpu_idle=99%;
do_output() {
output="user:${cpu_user}%, sys:${cpu_sys}%, iowait:${cpu_iowait}%, idle:${cpu_idle}%"
}
do_perfdata() {
perfdata="cpu_user=${cpu_user}%;${warn};${crit}; cpu_sys=${cpu_sys}%;${warn};${crit}; iowait=${cpu_iowait}%;${warn};${crit};"
}
if [ -n "$warn" -a -n "$crit" ]
then
val_wcdiff
if [ "$wcdiff" = 1 ]
then
printf "Please adjust your warning/critical thresholds. The warning must be lower than the critical level!"
exit ${ST_UK}
fi
fi
get_cpuvals ${interval}
do_output; do_perfdata
if [ -n "$warn" -a -n "$crit" ]
then
if [ "$cpu_usage" -ge "$warn" -a "$cpu_usage" -lt "$crit" ]
then
printf "WARNING - %s | %s\n" "${output}" "${perfdata}"
exit ${ST_WR}
elif [ "$cpu_usage" -ge "$crit" ]
then
printf "CRITICAL - %s | %s\n" "${output}" "${perfdata}"
exit ${ST_CR}
else
printf "OK - %s | %s\n" "${output}" "${perfdata}"
exit ${ST_OK}
fi
else
printf "OK - %s | %s\n" "${output}" "${perfdata}"
exit ${ST_OK}
fi
|
eddimas/nagios-plugins
|
check_cpu.sh
|
Shell
|
gpl-3.0
| 5,816 |
#!/bin/bash
#cpan must be install, otherwise download the Date::Business package, cd to directory, and do next section.
cpan get Date::Business
cpan make Date::Business
cpan install Date::Business
#perl Makefile.PL
#make
#make install
|
ecjbosu/energyScrapes
|
perlBusinessDayInstall.sh
|
Shell
|
gpl-3.0
| 238 |
#!/bin/bash
hadoop dfs -mkdir sim
hadoop dfs -rmr sim/d_lst
hadoop dfs -rmr sim/d_num
hadoop dfs -rmr sim/d_sim
hadoop jar $HADOOP_HOME/contrib/streaming/hadoop-streaming-*.jar -D mapred.reduce.tasks=40\
-D mapred.job.name='sim_d:num'\
-input twitter/triples/* -output sim/d_num\
-mapper 'python d_num_mapper.py' -reducer 'python num_reducer.py'\
-file d_num_mapper.py -file num_reducer.py
hadoop jar $HADOOP_HOME/contrib/streaming/hadoop-streaming-*.jar -D mapred.reduce.tasks=40\
-D mapred.job.name='sim_d:lst'\
-input sim/d_num -output sim/d_lst\
-mapper /bin/cat -reducer 'python list_reducer.py'\
-file list_reducer.py
hadoop jar $HADOOP_HOME/contrib/streaming/hadoop-streaming-*.jar -D mapred.reduce.tasks=40\
-D mapred.job.name='sim_d:sim'\
-input sim/d_lst -output sim/d_sim\
-mapper 'python similarity_mapper.py' -reducer 'python similarity_reducer.py'\
-file similarity_mapper.py -file similarity_reducer.py
|
plagree/TOPKS
|
dbscripts/similarity/hadoop_similarity_d.sh
|
Shell
|
gpl-3.0
| 969 |
#!/bin/bash
export LANG="en_US.UTF-8"
export LANGUAGE="en_US:"
xjc -p com.soapboxrace.jaxb.http -npa -no-header \
Main.xsd
cp -r com ../src/main/java
rm -rf com
|
SoapboxRaceWorld/soapbox-race-core
|
xsd-final/xjc.sh
|
Shell
|
gpl-3.0
| 165 |
#!/bin/bash
#From http://richelbilderbeek.nl/CppHelloBoostQtCreatorLubuntuToWindows.htm
echo "Cross compiling to Windows"
myfile="i686-pc-mingw32-qmake"
mytarget="CppLinkErrorUndefinedReferenceToWebCore"
myprofile=$mytarget.pro
if [ -e $myfile ]
then
echo "MXE crosscompiler '$myfile' found"
else
echo "MXE crosscompiler '$myfile' not found directly, but perhaps it is in the PATH"
#exit
fi
if [ -e $myprofile ]
then
echo "Qt Creator project '$myprofile' found"
else
echo "Qt Creator project '$myprofile' not found"
exit
fi
echo "1/2: Creating Windows makefile"
$myfile $myprofile
if [ -e Makefile ]
then
echo "Makefile created successfully"
else
echo "FAIL: qmake CppHelloBoostQtCreatorLubuntu.pro"
exit
fi
echo "2/2: making makefile"
make
if [ -e /release/$mytarget.exe ]
then
echo "SUCCESS"
else
echo "FAIL"
echo "Note: check if mxe can build glibc" #Knowledge
fi
#Cleaning up
rm ui_*.*
rm Makefile
rm Makefile.*
rm -r debug
rm -r release
|
richelbilderbeek/CppTests
|
CppLinkErrorUndefinedReferenceToWebCore/CppLinkErrorUndefinedReferenceToWebCore.sh
|
Shell
|
gpl-3.0
| 976 |
#!/bin/bash
# Generate sized icon images from 512x512 masters
for s in 16 24 32 48 64 128 256
do
[ -d ${s}x${s} ] || mkdir ${s}x${s}
done
for n in PyAuth PyAuth-white PyAuth-grey PyAuth-dark
do
for s in 16 24 32 48 64 128 256
do
convert ${n}.png -scale ${s}x${s} ${s}x${s}/${n}.png
done
i=`basename $n`.ico
icotool -c --icon -o ${i} 256x256/${n} 128x128/${n} 64x64/${n} 32x32/${n} 24x24/${n} 16x16/${n}
done
exit 0
|
tknarr/PyAuth
|
pyauth/images/genicons.sh
|
Shell
|
gpl-3.0
| 451 |
rm dc.log
rm dc.json
scrapy runspider dc_scrap.py -o dc.json --logfile=dc.log
grep log_count dc.log
|
fparrel/wwsupdb
|
dc_scrap.sh
|
Shell
|
gpl-3.0
| 100 |
#!/bin/sh
SERVICE=$1
kill -9 $(pidof $SERVICE)
|
pmanwatkar/puppetModules
|
actions/files/ProcessHandlingLinux/KillProcess.sh
|
Shell
|
gpl-3.0
| 48 |
#!/bin/bash
DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
RETURN=0
cd $DIR
echo ""
echo "ISORT"
echo "############################"
isort -rc --check --diff karaage
if [ "$?" -ne 0 ]
then
exit 1
fi
echo ""
echo "FLAKE8"
echo "############################"
flake8 karaage
if [ "$?" -ne 0 ]
then
exit 1
fi
conf="karaage.tests.settings"
echo ""
echo "MIGRATIONS - $conf"
echo "############################"
./manage.py makemigrations --settings="$conf" --check --dry-run
if [ "$?" -ne 0 ]
then
exit 1
fi
echo ""
echo "STATIC FILES - $conf"
echo "############################"
rm -rf tmp
./manage.py collectstatic --settings="$conf" -v 2 --noinput
if [ "$?" -ne 0 ]
then
exit 1
fi
echo ""
echo "TESTS - Python 3 - $conf - $tests"
echo "############################"
pytest --cov=karaage "$@"
if [ "$?" -ne 0 ]
then
exit 1
fi
exit "$RETURN"
|
brianmay/karaage
|
run_tests.sh
|
Shell
|
gpl-3.0
| 864 |
CONF_FILE=./etc/git-conf-sync.cfg
. $CONF_FILE
(cd $GCS_WORK ; gpg --homedir $GCS_GNUPGHOME --decrypt $GCS_REPO/curiass.gpg | tar xvz)
|
lizard-era/git-conf-sync
|
bin/6.decode.sh
|
Shell
|
gpl-3.0
| 137 |
#!/bin/sh
tar -xf c-blosc2-2.0.0.beta.5.tar.gz
cd c-blosc2-2.0.0.beta.5
mkdir build
cd build
cmake ..
make -j $NUM_CPU_CORES
echo $? > ~/install-exit-status
cd ~
echo "#!/bin/sh
cd c-blosc2-2.0.0.beta.5/build/bench
./b2bench \$1 noshuffle suite \$NUM_CPU_CORES > \$LOG_FILE
echo \$? > ~/test-exit-status" > blosc
chmod +x blosc
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/blosc-1.0.0/install.sh
|
Shell
|
gpl-3.0
| 331 |
#!/usr/bin/env bash
#
# This script builds the application from source for multiple platforms.
set -e
# Get the parent directory of where this script is.
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
# Change into that directory
cd "$DIR"
# Get the git commit
GIT_COMMIT=$(git rev-parse HEAD)
GIT_DIRTY=$(test -n "`git status --porcelain`" && echo "+CHANGES" || true)
# Determine the arch/os combos we're building for
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
XC_OS=${XC_OS:-linux darwin windows freebsd openbsd}
GOPATH=${GOPATH:-$(go env GOPATH)}
case $(uname) in
CYGWIN*)
GOPATH="$(cygpath $GOPATH)"
;;
esac
# Delete the old dir
echo "==> Removing old directory..."
rm -f bin/*
rm -rf pkg/*
mkdir -p bin/
# If its dev mode, only build for ourself
if [ "${TF_DEV}x" != "x" ]; then
XC_OS=$(go env GOOS)
XC_ARCH=$(go env GOARCH)
fi
# Build!
echo "==> Building..."
gox \
-os="${XC_OS}" \
-os="!freebsd" \
-os="!openbsd" \
-arch="${XC_ARCH}" \
-ldflags "-X github.com/hashicorp/vault/version.GitCommit ${GIT_COMMIT}${GIT_DIRTY}" \
-output "pkg/{{.OS}}_{{.Arch}}/vault" \
.
# Move all the compiled things to the $GOPATH/bin
OLDIFS=$IFS
IFS=: MAIN_GOPATH=($GOPATH)
IFS=$OLDIFS
# Copy our OS/Arch to the bin/ directory
DEV_PLATFORM="./pkg/$(go env GOOS)_$(go env GOARCH)"
for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do
cp ${F} bin/
cp ${F} ${MAIN_GOPATH}/bin/
done
if [ "${TF_DEV}x" = "x" ]; then
# Zip and copy to the dist dir
echo "==> Packaging..."
for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do
OSARCH=$(basename ${PLATFORM})
echo "--> ${OSARCH}"
pushd $PLATFORM >/dev/null 2>&1
zip ../${OSARCH}.zip ./*
popd >/dev/null 2>&1
done
fi
# Done!
echo
echo "==> Results:"
ls -hl bin/
|
sepiroth887/vault
|
scripts/build.sh
|
Shell
|
mpl-2.0
| 1,942 |
#!/bin/bash
source utils/robot
function info {
echo "[INFO]" $*
}
function error {
echo "[ERROR]" $*
}
BASEDIR=$(pwd)
failures=0
errors=0
oks=0
function lsTestCases {
for dir in $(lsTestDirectories)
do
# Retains directories containing an executable run.sh file
[ -x $dir/run.sh ] && echo $dir;
done
}
function lsTestDirectories {
find . -maxdepth 1 -regex "\./t[0-9]*" -type d
}
function runTestCase {
local tc=$1
cd $tc
local padding=$(strCpy "-" ${#tc})
info "--- $tc ---------------------------------------------------------------"
ts1=$(date +%s)
ns1=$(date +%N)
PATH=${BASEDIR}/${install_dir}/scripts:${BASEDIR}/utils:$PATH bash run.sh &
testPid=$!
wait $testPid
ec=$?
ts2=$(date +%s)
ns2=$(date +%N)
killChildren $testPid
info "---$padding-----------------------------------------------------------------"
sec=$(echo "scale=3; ($ts2 - $ts1) * 1 + $ns2 / 1000000000 - $ns1 / 1000000000" | bc -l)
info "Took ${sec} sec"
if [ $ec -gt 0 ]; then
let "failures ++"
elif [ $ec -lt 0 ]; then
let "errors ++"
else
let "oks ++"
fi
cd $BASEDIR
}
info "-----------------------------------"
info "Running integration tests"
info
installer=$1
install_dir='target/local'
rm -rf ${install_dir}
mkdir -p ${install_dir}
info "Installing..."
java -jar ${installer} --install ${install_dir} > /dev/null
bash ${install_dir}/setup.sh
info "Starting..."
bash ${install_dir}/start.sh > target/output.log &
PID=$(wait-tcp 2022 5) || exit 2
info "Server started"
info "-----------------------------------"
info "TESTS"
for tc in $(lsTestCases); do
runTestCase $tc
done
mistakes=$(( $failures + $errors ))
total=$(( $oks + $mistakes ))
info "OK: $oks"
info "Failures: $failures"
info "Errors: $errors"
info "Total: $total"
info "-----------------------------------"
info "Killing server"
kill $PID
info "Done"
info "-----------------------------------"
if [ $mistakes -eq 0 ]; then
info "TEST SUCCESSED"
else
error "TEST FAILED"
fi
exit $mistakes
|
ptitfred/magrit
|
robot/run.sh
|
Shell
|
agpl-3.0
| 1,991 |
#!/bin/bash
tilepath="0003"
#fastj2cgflib -o $tilepath.cglf -t <( ./verbose_tagset $tilepath ) -f <( find ./data -name $tilepath.fj.gz | head -n10 | xargs zcat )
#fastj2cgflib -V -o $tilepath.cglf -t $tilepath.verbose_tagset -f <( find ./data -name $tilepath.fj.gz | xargs zcat )
fastj2cgflib -V -t $tilepath.verbose_tagset -f <( find ./data -name $tilepath.fj.gz | xargs zcat )
|
curoverse/l7g
|
sandbox/tilelib/cglf_test.sh
|
Shell
|
agpl-3.0
| 385 |
VERSION=`cat ../drivnal/__init__.py | grep __version__ | cut -d\' -f2`
gpg --import private_key.asc
mkdir -p /vagrant/build/debian
cd /vagrant/build/debian
wget https://github.com/drivnal/drivnal/archive/$VERSION.tar.gz
tar xfz $VERSION.tar.gz
rm -rf drivnal-$VERSION/debian
tar cfz drivnal_$VERSION.orig.tar.gz drivnal-$VERSION
rm -rf drivnal-$VERSION
tar xfz $VERSION.tar.gz
cd drivnal-$VERSION
debuild -S
sed -i -e 's/0ubuntu1/0ubuntu1~quantal/g' debian/changelog
debuild -S
sed -i -e 's/0ubuntu1~quantal/0ubuntu1~raring/g' debian/changelog
debuild -S
sed -i -e 's/0ubuntu1~raring/0ubuntu1~saucy/g' debian/changelog
debuild -S
cd ..
echo '\n\nRUN COMMANDS BELOW TO UPLOAD:'
echo 'sudo dput ppa:drivnal/ppa/ubuntu/precise ../build/debian/drivnal_'$VERSION'-0ubuntu1_source.changes'
echo 'sudo dput ppa:drivnal/ppa/ubuntu/quantal ../build/debian/drivnal_'$VERSION'-0ubuntu1~quantal_source.changes'
echo 'sudo dput ppa:drivnal/ppa/ubuntu/raring ../build/debian/drivnal_'$VERSION'-0ubuntu1~raring_source.changes'
echo 'sudo dput ppa:drivnal/ppa/ubuntu/saucy ../build/debian/drivnal_'$VERSION'-0ubuntu1~saucy_source.changes'
|
drivnal/drivnal
|
tools/vagrant_build_ubuntu.sh
|
Shell
|
agpl-3.0
| 1,131 |
#!/bin/sh
cd electron
npm start
|
johansten/stargazer
|
run.sh
|
Shell
|
agpl-3.0
| 32 |
#!/bin/bash
set -e
set +h
function postinstall()
{
echo ca-directory=/etc/ssl/certs >> /etc/wgetrc
}
preinstall()
{
echo "#"
}
$1
|
FluidIdeas/parsers
|
blfs-resources/extrascripts/wget-appconfig.sh
|
Shell
|
lgpl-2.1
| 137 |
#!/bin/bash
PROG="${GRINS_TEST_DIR}/generic_solution_regression"
INPUT="${GRINS_TEST_INPUT_DIR}/reacting_low_mach_antioch_cea_constant_regression.in"
DATA="${GRINS_TEST_DATA_DIR}/reacting_low_mach_antioch_cea_constant_regression.xdr"
# A MOAB preconditioner
PETSC_OPTIONS="-pc_type asm -pc_asm_overlap 10 -sub_pc_type lu -sub_pc_factor_shift_type nonzero"
if [ $GRINS_ANTIOCH_ENABLED == 1 ]; then
${LIBMESH_RUN:-} $PROG input=$INPUT soln-data=$DATA vars='u v T p w_N2 w_N' norms='L2 H1' tol='1.5e-8' $PETSC_OPTIONS
else
exit 77;
fi
|
nicholasmalaya/grins
|
test/regression/reacting_low_mach_antioch_cea_constant.sh
|
Shell
|
lgpl-2.1
| 542 |
#!/bin/bash
# rename_column.sh username password host port dbname tblname oldcolumn newcolumn
exec 1>/dev/null
exec 2>/dev/null
. error_codes
definition=`echo -e \`echo "SHOW CREATE TABLE $5.$6" | mysql -h $3 -P $4 -u $1 --password=$2\` | grep $7 | head -n 1 | awk -F\\\` '{ print $3 }' | sed s/,//`
if [ $? -ne 0 ] ; then
exit $ERR_SQL_SHOW
fi
# the index might not exist and therefore the next command might fail
echo -e "DROP INDEX $7_index ON $5.$6" | mysql -h $3 -P $4 -u $1 --password=$2
echo -e "ALTER TABLE $5.$6 CHANGE $7 $8 $definition;\n" | mysql -h $3 -P $4 -u $1 --password=$2
if [ $? -ne 0 ] ; then
exit $ERR_SQL_ALTER
fi
echo -e "CREATE INDEX $8_index ON $5.$6 ($8(128));\n" | mysql -h $3 -P $4 -u $1 --password=$2
if [ $? -ne 0 ] ; then
exit $ERR_SQL_CREATE_INDEX
fi
|
pubmed2ensembl/MartScript
|
scripts/rename_column.sh
|
Shell
|
lgpl-2.1
| 797 |
JAVA_OPTS="-Djava.awt.headless=true -Xms512m -Xmx4096m -XX:+CMSClassUnloadingEnabled -XX:+CMSPermGenSweepingEnabled -XX:MaxPermSize=512m -XX:+UseConcMarkSweepGC"
|
cytomine/Cytomine-IMS
|
scripts/docker/ims/setenv.sh
|
Shell
|
lgpl-2.1
| 162 |
function _send.this () {
DIST_DIR="/Users/ademir/dist"
DIST_SRV=basic.ami
ARCHIVE_NAME=$1
FROM_DIR=$2
export COPY_EXTENDED_ATTRIBUTES_DISABLE=true
export COPYFILE_DISABLE=true
#COPYFILE_DISABLE=true
mkdir -p "$DIST_DIR"
tar -c --exclude-from=$SETUP_ROOT_PATH/.tarignore -vzf "$DIST_DIR"/$ARCHIVE_NAME.tar.gz -C $FROM_DIR .
scp $DIST_DIR/$ARCHIVE_NAME.tar.gz $DIST_SRV.mapa.io:/mnt/ebs/data/gis/upload/
}
function data.upload () {
SERVICE_DIR="/Users/ademir/code/mapa/service"
SRC_DATA_DIR="$SERVICE_DIR/maps/mapserver/data"
SRC_IMPORT_DIR="$SERVICE_DIR/maps/import"
SRC_FONT_DIR="$SERVICE_DIR/maps/mapserver/fonts"
DELAY_DEPLOY=true
_send.this sql.import $SRC_IMPORT_DIR/sql
if [ $DELAY_DEPLOY != true ]; then
_send.this raster.data $SRC_DATA_DIR
_send.this fonts $SRC_FONT_DIR/
#_send.this shapefiles $SRC_IMPORT_DIR/shp/
fi
}
function config.data.dir () {
mkdir -p /mnt/ebs/data/gis/upload
chown ademir.ademir /mnt/ebs/data/gis/upload
}
function explode.upload () {
DELAY_INSTALL=true
# tar -xf archive.tar -C /target/directory
rm -rf /mnt/ebs/data/gis/import/sql
mkdir -p /mnt/ebs/data/gis/import/sql
tar xzvf /mnt/ebs/data/gis/upload/sql.import.tar.gz -C /mnt/ebs/data/gis/import/sql
if [ $DELAY_INSTALL != true ]; then
rm -rf /mnt/ebs/data/gis/mapserver
mkdir -p /mnt/ebs/data/gis/mapserver/{raster, fonts}
tar xzvf /mnt/ebs/data/gis/upload/raster.data.tar.gz -C /mnt/ebs/data/gis/mapserver/raster
tar xzvf /mnt/ebs/data/gis/upload/fonts.tar.gz -C /mnt/ebs/data/gis/mapserver/fonts
#tar xzvf /mnt/ebs/data/upload/shapefiles.tar.gz -C /mnt/ebs/data/mapa/shp
fi
}
function config.mapa.dir () {
mkdir /mnt/ebs/mapa/sp
cd /mnt/ebs/sp
tar xzvf /mnt/ebs/upload/mapa.tar.gz
cd mapa
arg sp
}
|
escribano/old-setup
|
lib/data.sh
|
Shell
|
lgpl-3.0
| 1,794 |
mkdir ./build
pushd ./build
#TODO: 32 bit build
#echo building 32 bit version...
echo building 64-bit version...
mkdir ./x64
pushd ./x64
g++ -c -I../../src ../../src/cogmem/cogmem.cxx -o ./cogmem.o
ar rcs ./libcogmem.a ./cogmem.o
rm ./cogmem.o
popd
popd
|
krakencalamari/cogfw
|
build.sh
|
Shell
|
unlicense
| 261 |
#!/bin/sh
if test $# -lt 2; then echo "Usage: $0 confdir target"; exit 1; fi;
confdir=$1;
targetdir=$2;
|
thetaepsilon/nftfirewall
|
scripts/template.sh
|
Shell
|
unlicense
| 105 |
#!/bin/bash
echo ""
echo "******************************************"
echo "PcapPlusPlus Android configuration script "
echo "******************************************"
echo ""
# set Script Name variable
SCRIPT=`basename ${BASH_SOURCE[0]}`
# help function
function HELP {
echo -e \\n"Help documentation for ${SCRIPT}."\\n
echo ""
echo -e "Basic usage: $SCRIPT [-h] [--ndk-path] [--target] [--api] [--libpcap-include-dir] [--libpcap-lib-dir]"\\n
echo "The following switches are recognized:"
echo "--ndk-path --The path of Android NDK, for example: '/opt/Android/Sdk/ndk/22.0.7026061'"
echo "--target --Target architecture which must be one of these values:"
echo " - arm64-v8a"
echo " - armeabi-v7a"
echo " - x86"
echo " - x86_64"
echo "--api --Android API level. Must be between 21 and 30. If not provided, the default value is 29"
echo "--libpcap-include-dir --libpcap header files directory"
echo "--libpcap-lib-dir --libpcap pre compiled lib directory. Please make sure libpcap was compiled with the"
echo " same architecture and API level"
echo "--help|-h --Displays this help message and exits. No further actions are performed"\\n
echo ""
}
function TRANSLATE_TARGET {
if [ "$1" == "arm64-v8a" ]; then
echo "aarch64-linux-android"
return 0
fi
if [ "$1" == "armeabi-v7a" ]; then
echo "armv7a-linux-androideabi"
return 0
fi
if [ "$1" == "x86" ]; then
echo "i686-linux-android"
return 0
fi
if [ "$1" == "x86_64" ]; then
echo "x86_64-linux-android"
return 0
fi
}
if [ $# -eq 0 ]; then
HELP
exit 1
fi
# these are all the possible switches
OPTS=`getopt -o h --long ndk-path:,target:,api:,libpcap-lib-dir:,libpcap-include-dir: -- "$@"`
# if user put an illegal switch - print HELP and exit
if [ $? -ne 0 ]; then
HELP
exit 1
fi
eval set -- "$OPTS"
# Android-specific variables
NDK_PATH=""
TARGET=""
API=29
# initializing libpcap include/lib dirs to an empty string
LIBPCAP_INLCUDE_DIR=""
LIBPCAP_LIB_DIR=""
# go over all switches
while true ; do
case "$1" in
# NDK path
--ndk-path)
NDK_PATH=$2
if [ ! -d "$NDK_PATH" ]; then
echo "NDK directory '$NDK_PATH' not found. Exiting..."
exit 1
fi
shift 2 ;;
# Target
--target)
case "$2" in
arm64-v8a|armeabi-v7a|x86|x86_64)
;;
*)
echo -e \\n"Target must be one of:\n- arm64-v8a\n- armeabi-v7a\n- x86\n- x86_64\nExisting...\n"
exit 1
esac
TARGET=$(TRANSLATE_TARGET $2)
shift 2 ;;
# API version
--api)
API=$2
if [[ "$API" -ge 21 && "$API" -le 30 ]]; then
API=$2
else
echo -e \\n"API version must be between 21 and 30. Existing...\n"
exit 1
fi
shift 2 ;;
# libpcap include dir
--libpcap-include-dir)
LIBPCAP_INLCUDE_DIR=$2
shift 2 ;;
# libpcap binaries dir
--libpcap-lib-dir)
LIBPCAP_LIB_DIR=$2
shift 2 ;;
# help switch - display help and exit
-h|--help)
HELP
exit 0
;;
# empty switch - just go on
--)
shift ; break ;;
# illegal switch
*)
echo -e \\n"Option -$OPTARG not allowed."
HELP
exit 1
esac
done
if [ -z "$NDK_PATH" ]; then
echo "Please specify the NDK path using with '--ndk-path'. Exiting..."
exit 1
fi
if [ -z "$TARGET" ]; then
echo "Please specify the target using wity '--target'. Exiting..."
exit 1
fi
if [ -z "$LIBPCAP_INLCUDE_DIR" ]; then
echo "Please specify the location of libpcap header files with '--libpcap-include-dir'. Exiting..."
exit 1
fi
if [ -z "$LIBPCAP_LIB_DIR" ]; then
echo "Please specify the location of libpcap binary that matches the reqruied arch with '--libpcap-lib-dir'. Exiting..."
exit 1
fi
PLATFORM_MK="mk/platform.mk"
PCAPPLUSPLUS_MK="mk/PcapPlusPlus.mk"
# copy the basic Android platform.mk
cp -f mk/platform.mk.android $PLATFORM_MK
# copy the common (all platforms) PcapPlusPlus.mk
cp -f mk/PcapPlusPlus.mk.common $PCAPPLUSPLUS_MK
# add the Android definitions to PcapPlusPlus.mk
cat mk/PcapPlusPlus.mk.android >> $PCAPPLUSPLUS_MK
# set current directory as PCAPPLUSPLUS_HOME in platform.mk
echo -e "\nPCAPPLUSPLUS_HOME := "$PWD >> $PLATFORM_MK
# set target variable in PcapPlusPlus.mk
sed -i "1s|^|ANDROID_TARGET := $TARGET$API\n\n|" $PCAPPLUSPLUS_MK
# set NDK path variable in PcapPlusPlus.mk
sed -i "1s|^|ANDROID_NDK_PATH := $NDK_PATH\n\n|" $PCAPPLUSPLUS_MK
# set current direcrtory as PCAPPLUSPLUS_HOME in PcapPlusPlus.mk
sed -i "1s|^|PCAPPLUSPLUS_HOME := $PWD\n\n|" $PCAPPLUSPLUS_MK
# set target variable in platform.mk
sed -i "1s|^|ANDROID_TARGET := $TARGET$API\n\n|" $PLATFORM_MK
# set NDK path variable in platform.mk
sed -i "1s|^|ANDROID_NDK_PATH := $NDK_PATH\n\n|" $PLATFORM_MK
# set libpcap include dir
echo -e "LIBPCAP_INLCUDE_DIR := $LIBPCAP_INLCUDE_DIR" >> $PCAPPLUSPLUS_MK
echo -e "PCAPPP_INCLUDES += -I\$(LIBPCAP_INLCUDE_DIR)\n" >> $PCAPPLUSPLUS_MK
# set libpcap lib dir
echo -e "LIBPCAP_LIB_DIR := $LIBPCAP_LIB_DIR" >> $PCAPPLUSPLUS_MK
echo -e "PCAPPP_LIBS_DIR += -L\$(LIBPCAP_LIB_DIR)\n" >> $PCAPPLUSPLUS_MK
# finished setup script
echo "PcapPlusPlus configuration is complete. Files created (or modified): $PLATFORM_MK, $PCAPPLUSPLUS_MK"
|
seladb/PcapPlusPlus
|
configure-android.sh
|
Shell
|
unlicense
| 5,645 |
#!/bin/bash
# Copyright 2015 The QingYuan Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for debian os distro
# $1: if 'true', we're building a master yaml, else a node
function build-qing-env {
local master=$1
local file=$2
rm -f ${file}
cat >$file <<EOF
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SERVER_BINARY_TAR_HASH: $(yaml-quote ${SERVER_BINARY_TAR_HASH})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
SALT_TAR_HASH: $(yaml-quote ${SALT_TAR_HASH})
SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
QINGYUAN_MASTER_NAME: $(yaml-quote ${MASTER_NAME})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
ENABLE_CLUSTER_LOGGING: $(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
QINGLET_TOKEN: $(yaml-quote ${QINGLET_TOKEN:-})
QING_PROXY_TOKEN: $(yaml-quote ${QING_PROXY_TOKEN:-})
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
EOF
if [ -n "${QING_APISERVER_REQUEST_TIMEOUT:-}" ]; then
cat >>$file <<EOF
QING_APISERVER_REQUEST_TIMEOUT: $(yaml-quote ${QING_APISERVER_REQUEST_TIMEOUT})
EOF
fi
if [[ "${master}" == "true" ]]; then
# Master-only env vars.
cat >>$file <<EOF
QINGYUAN_MASTER: "true"
QING_USER: $(yaml-quote ${QING_USER})
QING_PASSWORD: $(yaml-quote ${QING_PASSWORD})
QING_BEARER_TOKEN: $(yaml-quote ${QING_BEARER_TOKEN})
MASTER_CERT: $(yaml-quote ${MASTER_CERT_BASE64:-})
MASTER_KEY: $(yaml-quote ${MASTER_KEY_BASE64:-})
QINGCFG_CERT: $(yaml-quote ${QINGCFG_CERT_BASE64:-})
QINGCFG_KEY: $(yaml-quote ${QINGCFG_KEY_BASE64:-})
EOF
else
# Node-only env vars.
cat >>$file <<EOF
QINGYUAN_MASTER: "false"
ZONE: $(yaml-quote ${ZONE})
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
ENABLE_DOCKER_REGISTRY_CACHE: $(yaml-quote ${ENABLE_DOCKER_REGISTRY_CACHE:-false})
QINGLET_CERT: $(yaml-quote ${QINGLET_CERT_BASE64:-})
QINGLET_KEY: $(yaml-quote ${QINGLET_KEY_BASE64:-})
EOF
fi
}
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
#
function create-master-instance {
local address_opt=""
[[ -n ${1:-} ]] && address_opt="--address ${1}"
write-master-env
gcloud compute instances create "${MASTER_NAME}" \
${address_opt} \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro,compute-rw" \
--can-ip-forward \
--metadata-from-file \
"startup-script=${QING_ROOT}/cluster/gce/configure-vm.sh,qing-env=${QING_TEMP}/master-qing-env.yaml" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
}
# TODO(mbforbes): Make $1 required.
# TODO(mbforbes): Document required vars (for this and call chain).
# $1 version
function create-node-instance-template {
local suffix=""
if [[ -n ${1:-} ]]; then
suffix="-${1}"
fi
create-node-template "${NODE_INSTANCE_PREFIX}-template${suffix}" "${scope_flags[*]}" \
"startup-script=${QING_ROOT}/cluster/gce/configure-vm.sh" \
"qing-env=${QING_TEMP}/node-qing-env.yaml"
}
|
qingyuancloud/qingyuan
|
cluster/gce/debian/helper.sh
|
Shell
|
apache-2.0
| 4,994 |
#!/bin/sh
set -o xtrace
pumba netem delay --help
read -p "Press enter to continue"
pumba --log-level=info --interval=20s netem --tc-image=gaiadocker/iproute2 --duration=10s delay --time=3000 --jitter=20 ping
|
gaia-adm/pumba
|
examples/pumba_delay.sh
|
Shell
|
apache-2.0
| 211 |
#!/bin/bash
set -e
SOURCE_PATH="$(realpath "${BASH_SOURCE[0]}")"
PROJECT_DIR="$(dirname $SOURCE_PATH)/../.."
function regen() {
saved_dir="$PWD"
cd "$PROJECT_DIR"
./go-build.sh
# regen CLI partials, pages and sidebar
HOME='~' werf docs --log-terminal-width=100
cd "$saved_dir"
}
function create_documentation_sidebar() {
documentation_path="$PROJECT_DIR/docs/documentation/_data/sidebars/documentation.yml"
cli_partial_path="$PROJECT_DIR/docs/documentation/_data/sidebars/_cli.yml"
documentation_partial_path="$PROJECT_DIR/docs/documentation/_data/sidebars/_documentation.yml"
cat << EOF > "$documentation_path"
# This file is generated by "regen.sh" command.
# DO NOT EDIT!
# This is your sidebar TOC. The sidebar code loops through sections here and provides the appropriate formatting.
EOF
cat "$cli_partial_path" >> "$documentation_path"
echo -e "\n" >> "$documentation_path"
cat "$documentation_partial_path" >> "$documentation_path"
}
regen
create_documentation_sidebar
|
flant/dapp
|
docs/documentation/regen.sh
|
Shell
|
apache-2.0
| 1,012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.