code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
SRC=src/client/tacion.js
DST=src/client/tacion.min.js
# minifies tacion and gives it a spiffy header
uglifyjs $SRC | tr '\n' ' ' | \
sed 's/\* \* Tacion v/ Tacion v/g' | \
sed 's/ \* A.*\*\//| GPLv2 + MIT | http:\/\/azof.fr\/tacionjs *\/ /g' \
> $DST
|
azoff/tacion.js
|
build.sh
|
Shell
|
mit
| 266 |
#!/bin/sh
echo "apiserver address $1"
echo "join token $2"
echo "kubernetes version $3"
sudo kubeadm init --apiserver-advertise-address $1 --token $2 --kubernetes-version $3
# move kubecfg file to home folder
mkdir -p $HOME/.kube
sudo cp -rf /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
# install weave network plugin
# https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#pod-network
export kubever=$(kubectl version | base64 | tr -d '\n')
kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever"
# copy kubecfg file to shared vagrant directory
cp -rf $HOME/.kube/config /vagrant/k8s-vagrant
|
HagaiBarel/k8s-vagrant
|
master-setup.sh
|
Shell
|
mit
| 681 |
#!/usr/bin/env bash
#
# -*- Mode: sh; coding: utf-8; indent-tabs-mode: nil; tab-width: 2 -*-
# vim:set expandtab tabstop=2 fenc=utf-8 fileformat=unix filetype=sh:
#
# Performs some adjustments for the Finder
system_type=$(uname -s)
# Show Hidden Files
dotfiles_install::darwin_finder::show_dotfiles() {
defaults write com.apple.finder AppleShowAllFiles true
}
# Show All File Extensions
dotfiles_install::darwin_finder::show_all_file_extensions() {
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
}
# Disable Creation of Metadata Files on Network Volumes
# Avoids creation of .DS_Store and AppleDouble files.
# https://github.com/herrbischoff/awesome-osx-command-line#disable-creation-of-metadata-files-on-network-volumes
dotfiles_install::darwin_finder::disable_metadata_on_network_volumes() {
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
}
# Disable Creation of Metadata Files on USB Volumes
# Avoids creation of .DS_Store and AppleDouble files.
# https://github.com/herrbischoff/awesome-osx-command-line#disable-creation-of-metadata-files-on-usb-volumes
dotfiles_install::darwin_finder::disable_metadata_on_usb_volumes() {
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
}
dotfiles_install::darwin_finder() {
if [ "$system_type" = "Darwin" ]; then
dotfiles_install::darwin_finder::show_dotfiles
dotfiles_install::darwin_finder::show_all_file_extensions
dotfiles_install::darwin_finder::disable_metadata_on_network_volumes
dotfiles_install::darwin_finder::disable_metadata_on_usb_volumes
killall Finder
else
echo "[skip] Adjusting Finder settings is available only on macOS machines."
fi
}
|
dreadwarrior/dotfiles
|
.local/bin/dotfiles-install/darwin_finder.sh
|
Shell
|
mit
| 1,711 |
#!/bin/sh
#SBATCH -J CS381VpROJ # Job name
#SBATCH -o experiment.out # Specify stdout output file (%j expands to jobId)
#SBATCH -p gpu # Queue name
#SBATCH -N 1 # Total number of nodes requested (16 cores/node)
#SBATCH -n 1 # Total number of tasks
#SBATCH -t 12:00:00 # Run time (hh:mm:ss) - 1.5 hours
#SBATCH -A CS381V-Visual-Recogn # Specify allocation to charge against
echo $@
# th neural_style.lua -content_image myimg/me.jpg -style_image myimg/horse.jpg -backend cudnn -seed 200 -save_iter 500 -style_weight ${style} -image_size 1000 -content_layers relu4_2 -face_layer relu3_2 -face_weight ${facew} -output_image style_${style}_relu42_face_relu32_facew${facew}.png
for i in $(seq 1 70); do
content_name=$(printf "IMG%02d" "$i")
th neural_style.lua -content_image evaldata/${content_name}.jpg -style_image evaldata/style/${style_pic}.jpg -backend cudnn -seed 200 -save_iter -1 -style_weight ${style} -image_size 800 -content_layers relu4_2 -face_layer ${face_layer} -face_weight ${facew} -output_image evaldata/out/${content_name}/${style_pic}_style${style}_facew${facew}_${face_layer}.png
done
|
zerolocker/neural-style
|
cmd.sh
|
Shell
|
mit
| 1,196 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
local source="${BUILT_PRODUCTS_DIR}/Pods-SMSegmentView_Tests/$1"
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source=$(readlink "${source}")
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers/" --filter "- PrivateHeaders/" --filter "- Modules/" ${source} ${destination}"
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers/" --filter "- PrivateHeaders/" --filter "- Modules/" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
if [ "${CODE_SIGNING_REQUIRED}" == "YES" ]; then
code_sign "${destination}/$1"
fi
# Embed linked Swift runtime libraries
local basename
basename=$(echo $1 | sed -E s/\\..+// && exit ${PIPESTATUS[0]})
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/$1/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
if [ "${CODE_SIGNING_REQUIRED}" == "YES" ]; then
code_sign "${destination}/${lib}"
fi
done
}
# Signs a framework with the provided identity
code_sign() {
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements $1"
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'SMSegmentView.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'SMSegmentView.framework'
fi
|
trungung/SMSegmentView
|
Example/Pods/Target Support Files/Pods-SMSegmentView_Tests/Pods-SMSegmentView_Tests-frameworks.sh
|
Shell
|
mit
| 2,367 |
#!/bin/bash
#
# Cleanroom Project release generator script
#
# by emaloney, 7 June 2015
#
set -o pipefail # to ensure xcodebuild pipeline errors are propagated correctly
SCRIPT_NAME=$(basename "$0")
SCRIPT_DIR=$(cd "$PWD" ; cd `dirname "$0"` ; echo "$PWD")
source "${SCRIPT_DIR}/include-common.sh"
showHelp()
{
define HELP <<HELP
$SCRIPT_NAME
Issues a new release of the project contained in this git repository.
Usage:
$SCRIPT_NAME <release-type> [...]
Where:
<release-type> is 'major', 'minor' or 'patch', depending on which portion
of the version number should be incremented for this release.
Optional arguments:
--set-version <version>
Make <version> the version number being released
--auto
Run automatically without awaiting user confirmation
--tag
Tags the repo with the version number upon success
--push
Push all changes upon success
--amend
Causes any commits to be amends to the previous commit
--branch <branch>
Specifies <branch> be used as the git branch for operations
--commit-message-file <file>
Specifies the contents <file> should be used as the commit message
--no-commit
Skips committing any changes; implies --no-tag
--no-tag
Overrides --tag if specified; --no-tag is the default
--stash-dirty-files
Stashes dirty files before attempting to release
--commit-dirty-files
Commits dirty files before attempting to release
--ignore-dirty-files
Ignores dirty files; implies --no-commit --no-tag
--skip-docs
Skips generating the documentation
--skip-tests
Skips running the unit tests :-(
--quiet
Silences output
--summarize
Minimizes output (ideal for invoking from other scripts)
Further detail can be found below.
How it works
By default, the script inspects the appropriate property list file(s)
to determine the current version of the project. The script then
increments the version number according to the release type
specified:
major — When the major release type is specified, the major version
component is incremented, and both the minor and patch
components are reset to zero. 2.1.3 becomes 3.0.0.
minor — When the minor release type is specified, the major version
component is not changed, while the minor component is
incremented and patch component is reset to zero.
2.1.3 becomes 2.2.0.
patch — When the patch release type is specified, the major and minor
version components remain unchanged, while the patch component
is incremented. 2.1.3 becomes 2.1.4.
The script then updates all necessary references to the version
elsewhere in the project.
Then, the API documentation is rebuilt, and the repository is tagged
with the appropriate version number for the release.
Finally, if the --push argument was supplied, the entire release is
pushed to the repo's origin remote.
Specifying the version explicitly
The --set-version argument can be supplied along with a version number
if you wish to specify the exact version number to use.
The version number is expected to contain exactly three integer
components separated by periods; trailing zeros are used if
necessary.
If you wanted to set a release version of 4.2.1, for example, you
could call the script as follows:
$SCRIPT_NAME --set-version 4.2.1
NOTE: When the --set-version argument is supplied, the release-type
argument does not need to be specified (and it will be ignored
if it is).
User Confirmation
By default, this script requires user confirmation before making
any changes.
To allow this script to be invoked by other scripts, an automated
mode is also supported.
When this script is run in automated mode, the user will not be
asked to confirm any actions; all actions are performed immediately.
To enable automated mode, supply the --auto argument.
Releasing with uncommitted changes
Normally, this script will refuse to continue if the repository
is dirty; that is, if there are any modified files that haven't
yet been committed.
However, you can force a release to be issued from a dirty repo
using either the --stash-dirty-files or the --commit-dirty-files
argument.
The --stash-dirty-files option causes a git stash operation to
occur at the start of the release process, and a stash pop at the
end. This safely moves the dirty files out of the way when the
script it doing its thing, and restores them when it is done.
The --commit-dirty-files option causes the dirty files to be
committed along with the other changes that occur during the
release process.
In addition, an --ignore-dirty-files option is available, which
lets you go through the entire release process, but stops short
of committing and tagging. This allows you to run through the
entire release process without committing you to committing.
Note that these options are mutually exclusive and may not be
used with each other.
Help
This documentation is displayed when supplying the --help (or -help, -h,
or -?) argument.
Note that when this script displays help documentation, all other
command line arguments are ignored and no other actions are performed.
HELP
printf "$HELP" | less
}
validateVersion()
{
if [[ ! ($1 =~ ^[0-9]+\.[0-9]+\.[0-9]+$) ]]; then
exitWithErrorSuggestHelp "Expected $2 to contain three period-separated numeric components (eg., 3.6.1, 4.0.0, etc.); got $1 instead"
fi
}
cleanupDirtyStash()
{
updateStatus "Restoring previously-stashed modified files"
executeCommand "git stash pop"
}
#
# make sure we're in a git repo
#
cd "$SCRIPT_DIR/../../."
git status 2&> /dev/null
if [[ $? != 0 ]]; then
exitWithErrorSuggestHelp "You must invoke this script from within a git repo"
fi
#
# parse the command-line arguments
#
AMEND_ARGS=""
BRANCH=master
STASH_DIRTY_FILES=0
COMMIT_DIRTY_FILES=0
IGNORE_DIRTY_FILES=0
while [[ $1 ]]; do
case $1 in
--set-version)
shift
if [[ -z $1 ]]; then
exitWithErrorSuggestHelp "The $1 argument expects a value"
else
validateVersion $1 "the version passed with the --set-version argument"
SET_VERSION=$1
fi
;;
--auto|-a)
AUTOMATED_MODE=1
;;
--amend)
AMEND_ARGS="--amend --no-edit"
;;
--stash-dirty-files)
STASH_DIRTY_FILES=1
;;
--commit-dirty-files)
COMMIT_DIRTY_FILES=1
;;
--ignore-dirty-files)
IGNORE_DIRTY_FILES=1
NO_COMMIT=1
NO_TAG=1
;;
--no-commit)
NO_COMMIT=1
NO_TAG=1
;;
--no-tag)
NO_TAG=1
;;
--tag)
TAG_WHEN_DONE=1
;;
--push)
PUSH_WHEN_DONE=1
;;
--branch|-b)
if [[ $2 ]]; then
BRANCH="$2"
shift
fi
;;
--commit-message-file|-m)
if [[ $2 ]]; then
COMMIT_MESSAGE=`cat "$2"`
shift
fi
;;
--skip-docs)
SKIP_DOCUMENTATION=1
;;
--skip-tests)
SKIP_TESTS=1
;;
--quiet|-q)
QUIET=1
QUIET_ARG="-q"
;;
--summarize|-z)
SUMMARIZE=1
QUIET=1
QUIET_ARG="-q"
;;
--rebase)
REBASE=1
;;
--help|-help|-h|-\?)
SHOW_HELP=1
;;
-*)
exitWithErrorSuggestHelp "Unrecognized argument: $1"
;;
*)
if [[ -z $ARGS ]]; then
ARGS=$1
else
ARGS="$ARGS $1"
fi
esac
shift
done
if [[ $SHOW_HELP ]]; then
showHelp
exit 1
fi
for ARG in $ARGS; do
if [[ -z $RELEASE_TYPE ]]; then
RELEASE_TYPE="$ARG"
else
exitWithErrorSuggestHelp "Unrecognized argument: $ARG"
fi
done
#
# validate the input
#
if [[ $(( $STASH_DIRTY_FILES + $COMMIT_DIRTY_FILES + $IGNORE_DIRTY_FILES )) > 1 ]]; then
exitWithErrorSuggestHelp "The --stash-dirty-files, --commit-dirty-files and --ignore-dirty-files arguments are mutually exclusive and can't be used with each other"
fi
if [[ ! -z $RELEASE_TYPE ]]; then
if [[ ! -z $SET_VERSION ]]; then
exitWithErrorSuggestHelp "The release type can't be specified when --set-version is used"
elif [[ $RELEASE_TYPE != "major" && $RELEASE_TYPE != "minor" && $RELEASE_TYPE != "patch" ]]; then
exitWithErrorSuggestHelp "The release type argument must be one of: 'major', 'minor' or 'patch'"
fi
elif [[ -z $SET_VERSION ]]; then
if [[ -z $RELEASE_TYPE ]]; then
exitWithErrorSuggestHelp "The release type ('major', 'minor' or 'patch') must be specified as an argument."
fi
fi
#
# figure out what the current version is
#
FRAMEWORK_PLIST_FILE="Info-Target.plist"
FRAMEWORK_PLIST_PATH="$SCRIPT_DIR/../$FRAMEWORK_PLIST_FILE"
PLIST_BUDDY=/usr/libexec/PlistBuddy
CURRENT_VERSION=`$PLIST_BUDDY "$FRAMEWORK_PLIST_PATH" -c "Print :CFBundleShortVersionString"`
validateVersion "$CURRENT_VERSION" "the CFBundleShortVersionString value in the $FRAMEWORK_PLIST_FILE file"
#
# now, do the right thing depending on the command-line arguments
#
if [[ ! -z $SET_VERSION ]]; then
VERSION=$SET_VERSION
elif [[ ! -z $RELEASE_TYPE ]]; then
MAJOR_VERSION=`echo $CURRENT_VERSION | awk -F . '{print int($1)}'`
MINOR_VERSION=`echo $CURRENT_VERSION | awk -F . '{print int($2)}'`
PATCH_VERSION=`echo $CURRENT_VERSION | awk -F . '{print int($3)}'`
case $RELEASE_TYPE in
major)
MAJOR_VERSION=$(( $MAJOR_VERSION + 1 ))
MINOR_VERSION=0
PATCH_VERSION=0
;;
minor)
MINOR_VERSION=$(( $MINOR_VERSION + 1 ))
PATCH_VERSION=0
;;
patch)
PATCH_VERSION=$(( $PATCH_VERSION + 1 ))
;;
esac
VERSION="${MAJOR_VERSION}.${MINOR_VERSION}.${PATCH_VERSION}"
fi
#
# try to figure out the origin repo name
#
REPO_NAME=$(git remote -v | grep "^origin" | grep "(fetch)" | awk '{print $2}' | xargs basename | sed 'sq.git$qq')
if [[ -z "$REPO_NAME" ]]; then
exitWithErrorSuggestHelp "Couldn't determine repo name"
fi
#
# output a warning if there are conflicting tag flags
#
if [[ $TAG_WHEN_DONE && $NO_TAG ]]; then
exitWithErrorSuggestHelp "--tag can't be specified with --no-tag, --no-commit or --ignore-dirty-files"
fi
#
# see if we've got uncommitted changes
#
git rev-parse --quiet --verify HEAD > /dev/null
if [[ $? == 0 ]]; then
git diff-index --quiet HEAD -- ; REPO_IS_DIRTY=$?
else
# if HEAD doesn't exist (which is how we get here), then treat the
# repo as if it were dirty
REPO_IS_DIRTY=1
fi
if [[ $REPO_IS_DIRTY != 0 && $(( $STASH_DIRTY_FILES + $COMMIT_DIRTY_FILES + $IGNORE_DIRTY_FILES )) == 0 ]]; then
exitWithErrorSuggestHelp "You have uncommitted changes in this repo; won't do anything" "(use --stash-dirty-files, --commit-dirty-files or\n\t--ignore-dirty-files to bypass this error)"
fi
REPO_URL=`git remote get-url --push origin 2> /dev/null`
if [[ $? == 0 ]]; then
git ls-remote --heads $REPO_URL $BRANCH | grep "refs/heads/$BRANCH" > /dev/null
if [[ $? == 0 ]]; then
REMOTE_BRANCH_EXISTS=1
fi
fi
if [[ ! $REMOTE_BRANCH_EXISTS ]]; then
GIT_PUSH_ARGS="--set-upstream origin $BRANCH"
fi
confirmationPrompt "Releasing $REPO_NAME $VERSION (current is $CURRENT_VERSION)"
if [[ $REPO_IS_DIRTY && $STASH_DIRTY_FILES > 0 ]]; then
updateStatus "Stashing modified files"
executeCommand "git stash"
trap cleanupDirtyStash EXIT
fi
#
# make sure it builds
#
XCODEBUILD=/usr/bin/xcodebuild
XCODEBUILD_CMD="$XCODEBUILD"
updateStatus "Verifying that $REPO_NAME builds"
if [[ $QUIET ]]; then
XCODEBUILD_CMD="$XCODEBUILD -quiet"
fi
if [[ ! -x "$XCODEBUILD" ]]; then
exitWithErrorSuggestHelp "Expected to find xcodebuild at path $XCODEBUILD"
fi
#
# use xcpretty if it is available
#
XCODEBUILD_PIPETO=""
XCPRETTY=`which xcpretty`
if [[ $? == 0 ]]; then
XCODEBUILD_PIPETO="| $XCPRETTY"
fi
#
# determine build settings
#
PROJECT_SPECIFIER="-workspace MBGeolocation.xcworkspace"
COMPILE_PLATFORMS="iOS macOS tvOS watchOS"
PROJECT_NAME="MBGeolocation"
#
# build for each platform
#
for PLATFORM in $COMPILE_PLATFORMS; do
updateStatus "Building: $PROJECT_NAME for $PLATFORM..."
summarize "building $PROJECT_NAME for $PLATFORM"
if [[ $SKIP_TESTS ]]; then
BUILD_ACTION="clean build"
else
BUILD_ACTION="clean $(testActionForPlatform $PLATFORM)"
fi
RUN_DESTINATION="$(runDestinationForPlatform $PLATFORM)"
if [[ $QUIET ]]; then
executeCommand "$XCODEBUILD_CMD $PROJECT_SPECIFIER -scheme \"${REPO_NAME}\" -configuration Debug -destination \"$RUN_DESTINATION\" $BUILD_ACTION $XCODEBUILD_PIPETO" 2&> /dev/null
else
executeCommand "$XCODEBUILD_CMD $PROJECT_SPECIFIER -scheme \"${REPO_NAME}\" -configuration Debug -destination \"$RUN_DESTINATION\" $BUILD_ACTION $XCODEBUILD_PIPETO"
fi
done
#
# bump version numbers
#
updateStatus "Adjusting version numbers"
executeCommand "$PLIST_BUDDY \"$FRAMEWORK_PLIST_PATH\" -c \"Set :CFBundleShortVersionString $VERSION\""
agvtool bump > /dev/null
summarize "bumped version to $VERSION from $CURRENT_VERSION for $RELEASE_TYPE release"
#
# commit changes
#
BUILD_NUMBER=`agvtool vers -terse`
if [[ -z $COMMIT_MESSAGE ]]; then
COMMIT_MESSAGE="Release $VERSION (build $BUILD_NUMBER)"
if [[ $REPO_IS_DIRTY && $COMMIT_DIRTY_FILES > 0 ]]; then
COMMIT_MESSAGE="$COMMIT_MESSAGE -- committed with other changes"
fi
else
COMMIT_MESSAGE="[$VERSION] $COMMIT_MESSAGE"
fi
if [[ -z $NO_COMMIT ]]; then
updateStatus "Committing changes"
printf "%s" "$COMMIT_MESSAGE" | git commit -a $QUIET_ARG $AMEND_ARGS -F -
summarize "committed changes to \"$BRANCH\" branch"
else
updateStatus "! Not committing changes; --no-commit or --ignore-dirty-files was specified"
printf "> To commit manually, use:\n\n git commit -a -m '$COMMIT_MESSAGE'\n"
fi
#
# rebase with existing changes if needed
#
if [[ $REBASE && $REMOTE_BRANCH_EXISTS ]]; then
updateStatus "Rebasing with existing $BRANCH branch"
executeCommand "git pull origin $BRANCH $QUIET_ARG --rebase --allow-unrelated-histories --strategy=recursive -Xtheirs"
summarize "rebased \"$BRANCH\" branch"
fi
#
# tag repo with new version number
#
if [[ $TAG_WHEN_DONE && -z $NO_COMMIT && -z $NO_TAG ]]; then
updateStatus "Tagging repo for $VERSION release"
executeCommand "git tag -a $VERSION -m 'Release $VERSION issued by $SCRIPT_NAME'"
summarize "tagged \"$BRANCH\" branch with $VERSION"
else
updateStatus "! Not tagging repo; --tag was not specified"
printf "> To tag manually, use:\n\n git tag -a $VERSION -m 'Release $VERSION issued by $SCRIPT_NAME'\n"
fi
#
# push if we should
#
if [[ $PUSH_WHEN_DONE && -z $NO_COMMIT ]]; then
ORIGIN_URL=`git remote get-url --push origin`
updateStatus "Pushing changes to \"$BRANCH\" branch of $ORIGIN_URL"
executeCommand "git push $QUIET_ARG $GIT_PUSH_ARGS"
if [[ $TAG_WHEN_DONE && !$NO_TAG ]]; then
executeCommand "git push --tags $QUIET_ARG"
fi
summarize "pushed changes to \"$BRANCH\" branch of $ORIGIN_URL"
else
printf "\n> REMEMBER: The release isn't done until you push the changes! Don't forget to:\n\n git push && git push --tags\n"
fi
|
emaloney/MBGeolocation
|
BuildControl/bin/releaseMe.sh
|
Shell
|
mit
| 14,436 |
#!/usr/bin/env bash
set -euo pipefail
rm -rf RxAlamofire-SPM.xcodeproj
rm -rf xcarchives/*
rm -rf RxAlamofire.xcframework.zip
rm -rf RxAlamofire.xcframework
brew bundle
xcodegen --spec project-spm.yml
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire iOS" -destination "generic/platform=iOS" -archivePath "xcarchives/RxAlamofire-iOS" SKIP_INSTALL=NO SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire iOS" -destination "generic/platform=iOS Simulator" -archivePath "xcarchives/RxAlamofire-iOS-Simulator" SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire tvOS" -destination "generic/platform=tvOS" -archivePath "xcarchives/RxAlamofire-tvOS" SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire tvOS" -destination "generic/platform=tvOS Simulator" -archivePath "xcarchives/RxAlamofire-tvOS-Simulator" SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire macOS" -destination "generic/platform=macOS" -archivePath "xcarchives/RxAlamofire-macOS" SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire watchOS" -destination "generic/platform=watchOS" -archivePath "xcarchives/RxAlamofire-watchOS" SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild archive -quiet -project RxAlamofire-SPM.xcodeproj -configuration Release -scheme "RxAlamofire watchOS" -destination "generic/platform=watchOS Simulator" -archivePath "xcarchives/RxAlamofire-watchOS-Simulator" SKIP_INSTALL=NO BUILD_LIBRARY_FOR_DISTRIBUTION=YES OTHER_CFLAGS="-fembed-bitcode" BITCODE_GENERATION_MODE="bitcode" ENABLE_BITCODE=YES | xcpretty --color --simple
xcodebuild -create-xcframework \
-framework "xcarchives/RxAlamofire-iOS-Simulator.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-iOS-Simulator.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-framework "xcarchives/RxAlamofire-iOS.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-iOS.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-framework "xcarchives/RxAlamofire-tvOS-Simulator.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-tvOS-Simulator.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-framework "xcarchives/RxAlamofire-tvOS.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-tvOS.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-framework "xcarchives/RxAlamofire-macOS.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-macOS.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-framework "xcarchives/RxAlamofire-watchOS-Simulator.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-watchOS-Simulator.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-framework "xcarchives/RxAlamofire-watchOS.xcarchive/Products/Library/Frameworks/RxAlamofire.framework" \
-debug-symbols ""$(pwd)"/xcarchives/RxAlamofire-watchOS.xcarchive/dSYMs/RxAlamofire.framework.dSYM" \
-output "RxAlamofire.xcframework"
zip -r RxAlamofire.xcframework.zip RxAlamofire.xcframework
rm -rf xcarchives/*
rm -rf RxAlamofire.xcframework
rm -rf RxAlamofire-SPM.xcodeproj
|
RxSwiftCommunity/RxAlamofire
|
scripts/xcframeworks.sh
|
Shell
|
mit
| 4,472 |
#!/bin/sh
if [ $# -ne 4 ]
then
echo "Usage: $0 <source> <start> <end> <width>" 1>&2
exit 2
fi
SOURCE="$1"
START="$2"
END="$3"
WIDTH="$4"
LENGTH=`expr $END - $START`
hexdump -s $START -n $LENGTH -e $WIDTH'/1 "%02X "' -e '" : "' -e '"%_p"' -e '"\n"' $SOURCE
|
forte916/hello_world
|
bash/hexdump.sh
|
Shell
|
mit
| 269 |
#!/bin/sh
cd /home/wwwroot/www.reactshare.cn/air/images
convert -delay 100 -loop 0 *.png animation.gif
convert -delay 20 -loop 0 *.png animation-fast.gif
# 输出视频
#cat *.png | ffmpeg -f image2pipe -framerate 10 -i - output2.mkv
|
raywill/crawl_air
|
animate.sh
|
Shell
|
mit
| 238 |
#!/bin/bash
for i in *.tsp
do
for j in `echo "1 2 3 4 5 6 7 8 9 10"`
do
echo "$i" >> tspOptimized.Bench
./tsp -f $i -d >> tspOptimized.Bench
done
done
|
heineman/algorithms-nutshell-2ed
|
Code/Graph/SingleSourceShortestPath/Tables/DeltaDense.sh
|
Shell
|
mit
| 167 |
cf cups config-service -p '{"uri":"http://config-server.cf.demo.local/"}'
cf cups service-registry -p '{"uri":"http://eureka.cf.demo.local/"}'
cf cs p-mysql 100mb-dev fortune-db
|
Sbodiu-pivotal/cloud-lab
|
labs/fortune-teller/create_services-local.sh
|
Shell
|
cc0-1.0
| 178 |
#!/bin/sh
#(c) Copyright Barry Kauler 2009, puppylinux.com
#2009 Lesser GPL licence v2 (see /usr/share/doc/legal).
#called from /usr/local/petget/installpreview.sh or check_deps.sh
#/tmp/petget_proc/petget_pkg_name_aliases_patterns is written by pkg_chooser.sh.
#passed param is a list of dependencies (DB_dependencies field of the pkg database).
#results format, see comment end of this script.
#100126 handle PKG_NAME_IGNORE variable from file PKGS_MANAGEMENT.
#100711 fix handling of PKG_NAME_ALIASES variable (defined in PKGS_MANAGEMENT file).
#110706 finding missing dependencies fix (running mageia 1).
#110722 versioning info added to dependencies.
#110822 versioning operators can be chained, ex: +linux_kernel&ge2.6.32<2.6.33
#120822 in precise puppy have a pet 'cups' instead of the ubuntu debs. the latter are various pkgs, including 'libcups2'. we don't want libcups2 showing up as a missing dependency, so have to screen these alternative names out. see also pkg_chooser.sh
#120904 bugfix, was very slow.
DB_dependencies="$1" #in standard format of the package database, field 9.
. /etc/DISTRO_SPECS #has DISTRO_BINARY_COMPAT, DISTRO_COMPAT_VERSION
. /root/.packages/DISTRO_PKGS_SPECS #has PKGS_SPECS_TABLE
. /root/.packages/PKGS_MANAGEMENT #has PKG_ALIASES_INSTALLED
#110722 versioning info added to dependencies...
#the dependencies field can now have &ge, >, &eq, &le, <
#ex1: |+ncurses,+readline&ge2.3.5,+glibc|
#chained operators allowed: ex2: |+ncurses,+readline&ge2.3.5<2.3.6,+glibc|
#if you want a package to be kernel version sensitive:
#ex3: |+ncurses,+readline,+glibc,+linux_kernel&ge2.6.39|
#make pkg deps into patterns...
( #> /tmp/petget_proc/petget_pkg_deps_patterns
#ex: DB_dependencies='+ncurses,+readline,+glibc,+linux_kernel&ge2.6.39'
for i in ${DB_dependencies//,/ } #convert ',' to ' '
do
i=${i#+} #remove leading '+'
i="${i%%&*}" #chop off any versioning info
echo "|${i}|" #e.g.: |ncurses|
done
) > /tmp/petget_proc/petget_pkg_deps_patterns
#same as above, but with versioning info...
( #> /tmp/petget_proc/petget_pkg_deps_patterns_with_versioning
for i in ${DB_dependencies//,/ } #convert ',' to ' '
do
i=${i#+} #remove leading '+'
i="${i//&/|}" #convert '&' to '|'
echo "|${i}|" #e.g.: |linux_kernel|ge2.6.39|
done
) > /tmp/petget_proc/petget_pkg_deps_patterns_with_versioning
#110706 mageia, a dep "libdbus-glib-1_2" must be located in variable PKG_ALIASES_INSTALLED (in file PKGS_MANAGEMENT)...
#/tmp/petget_proc/petget_pkg_name_aliases_patterns[_raw] created in check_deps.sh
for ONEALIAS in `cat /tmp/petget_proc/petget_pkg_name_aliases_patterns_raw | tr '\n' ' ' | tr ',' ' '` #ex: |cxxlibs|,|glibc.*|,|libc\-.*|
do
FNDDEPSPTNS="`grep "$ONEALIAS" /tmp/petget_proc/petget_pkg_deps_patterns`"
if [ "$FNDDEPSPTNS" != "" ];then
echo "$FNDDEPSPTNS" >> /tmp/petget_proc/petget_pkg_name_aliases_patterns
fi
done
#need patterns of all installed pkgs...
#100711 /tmp/petget_proc/petget_installed_patterns_system is created in pkg_chooser.sh.
cp -f /tmp/petget_proc/petget_installed_patterns_system /tmp/petget_proc/petget_installed_patterns_all
if [ -s /root/.packages/user-installed-packages ];then
INSTALLED_PATTERNS_USER="`cat /root/.packages/user-installed-packages | cut -f 2 -d '|' | sed -e 's%^%|%' -e 's%$%|%' -e 's%\\-%\\\\-%g'`"
echo "$INSTALLED_PATTERNS_USER" >> /tmp/petget_proc/petget_installed_patterns_all
#120822 this code also in pkg_chooser.sh, find alt deb names...
case $DISTRO_BINARY_COMPAT in
ubuntu|debian|devuan|raspbian)
#120904 bugfix, was very slow...
MODIF1=`stat -c %Y /root/.packages/user-installed-packages` #seconds since epoch.
MODIF2=0
[ -f /var/local/petget/installed_alt_ptns_pet_user ] && MODIF2=`stat -c %Y /var/local/petget/installed_alt_ptns_pet_user`
if [ $MODIF1 -gt $MODIF2 ];then
INSTALLED_PTNS_PET="$(grep '\.pet|' /root/.packages/user-installed-packages | cut -f 2 -d '|')"
if [ "$INSTALLED_PTNS_PET" != "" ];then
xINSTALLED_PTNS_PET="$(echo "$INSTALLED_PTNS_PET" | sed -e 's%^%/%' -e 's%$%|%' -e 's%\-%\\-%g')"
echo "$xINSTALLED_PTNS_PET" > /tmp/petget_proc/petget/fmp_xipp
INSTALLED_ALT_NAMES="$(grep --no-filename -f /tmp/petget_proc/petget/fmp_xipp /root/.packages/Packages-${DISTRO_BINARY_COMPAT}-${DISTRO_COMPAT_VERSION}-* | cut -f 2 -d '|')"
if [ "$INSTALLED_ALT_NAMES" ];then
INSTALLED_ALT_PTNS="$(echo "$INSTALLED_ALT_NAMES" | sed -e 's%^%|%' -e 's%$%|%' -e 's%\-%\\-%g')"
echo "$INSTALLED_ALT_PTNS" > /var/local/petget/installed_alt_ptns_pet_user
echo "$INSTALLED_ALT_PTNS" >> /tmp/petget_proc/petget_installed_patterns_all
fi
fi
touch /var/local/petget/installed_alt_ptns_pet_user
else
cat /var/local/petget/installed_alt_ptns_pet_user >> /tmp/petget_proc/petget_installed_patterns_all
fi
;;
esac
fi
#add these alias names to the installed patterns...
#packages may have different names, add them to installed list...
INSTALLEDALIASES="`grep --file=/tmp/petget_proc/petget_installed_patterns_all /tmp/petget_proc/petget_pkg_name_aliases_patterns | tr ',' '\n'`"
[ "$INSTALLEDALIASES" ] && echo "$INSTALLEDALIASES" >> /tmp/petget_proc/petget_installed_patterns_all
#110706 mageia, a dep "libdbus-glib-1_2" must be located in variable PKG_ALIASES_INSTALLED (in file PKGS_MANAGEMENT)...
#/tmp/petget_proc/petget_pkg_name_aliases_patterns[_raw] created in check_deps.sh, pkg_chooser.sh
for ONEALIAS in `cat /tmp/petget_proc/petget_pkg_name_aliases_patterns_raw | tr '\n' ' ' | tr ',' ' '` #ex: |cxxlibs|,|glibc.*|,|libc\-.*|
do
FNDPTN="`grep "$ONEALIAS" /tmp/petget_proc/petget_installed_patterns_all`"
if [ "$FNDPTN" ];then
FNDDEPPTN="`grep "$ONEALIAS" /tmp/petget_proc/petget_pkg_deps_patterns`"
[ "$FNDDEPPTN" ] && echo "$FNDDEPPTN" >> /tmp/petget_proc/petget_installed_patterns_all
fi
done
#100126 some names to ignore, as most likely already installed...
#/tmp/petget_proc/petget_pkg_name_ignore_patterns is created in pkg_choose.sh
cat /tmp/petget_proc/petget_pkg_name_ignore_patterns >> /tmp/petget_proc/petget_installed_patterns_all
#clean it up...
grep -v '^$' /tmp/petget_proc/petget_installed_patterns_all > /tmp/petget_proc/petget_installed_patterns_all-tmp
mv -f /tmp/petget_proc/petget_installed_patterns_all-tmp /tmp/petget_proc/petget_installed_patterns_all
#remove installed pkgs from the list of dependencies...
MISSINGDEPS_PATTERNS="`grep --file=/tmp/petget_proc/petget_installed_patterns_all -v /tmp/petget_proc/petget_pkg_deps_patterns | grep -v '^$'`"
echo "$MISSINGDEPS_PATTERNS" > /tmp/petget_proc/petget_missingpkgs_patterns #can be read by dependencies.sh, find_deps.sh.
#notes on results:
#/tmp/petget_proc/petget_missingpkgs_patterns has a list of missing dependencies, format ex:
# |kdebase|
# |kdelibs|
# |mesa-lib|
# |qt|
#/tmp/petget_proc/petget_installed_patterns_all (read in dependencies.sh) has a list of already installed
# packages, both builtin and user-installed. One on each line, exs:
# |915resolution|
# |a52dec|
# |absvolume_puppy|
# |alsa\-lib|
# |cyrus\-sasl|
# ...notice the '-' are backslashed.
#110722
MISSINGDEPS_PATTERNS_WITHVER="`grep --file=/tmp/petget_proc/petget_missingpkgs_patterns /tmp/petget_proc/petget_pkg_deps_patterns_with_versioning | grep -v '^$'`"
echo "$MISSINGDEPS_PATTERNS_WITHVER" > /tmp/petget_proc/petget_missingpkgs_patterns_with_versioning #can be read by dependencies.sh, find_deps.sh.
#...ex each line: |kdebase|ge2.3.6|
# ex with chained operators: |kdebase|ge2.3.6|lt2.4.5|
#note, dependencies.sh currently not using this file.
#120831 npierce, jemimah: dependencies.sh now using /tmp/petget_proc/petget_missingpkgs_patterns_with_versioning.
###END###
|
jamesbond3142/woof-CE
|
woof-code/rootfs-skeleton/usr/local/petget/findmissingpkgs.sh
|
Shell
|
gpl-2.0
| 7,689 |
#! /bin/sh -e
#just a test file ignore this
PATH=/usr/local/bin/node:/bin:/usr/bin:/sbin:/usr/sbin
DAEMON=/root/Desktop/java_n_node/update/autoupdate.js #where's ur app at?
case "$1" in
start) forever start $DAEMON ;;
stop) forever stop $DAEMON ;;
force-reload|restart)
forever restart $DAEMON ;;
*) echo "Usage: /etc/init.d/flipchans-autoupdater {start|stop|restart|force-reload}"
exit 1 ;;
esac
exit 0
|
flipchan/autoupdater
|
forever.sh
|
Shell
|
gpl-2.0
| 404 |
#!/bin/sh
# Default build for Raspbian
ARCH="arm"
while getopts ":v:p:a:" opt; do
case $opt in
v)
VERSION=$OPTARG
;;
p)
PATCH=$OPTARG
;;
a)
ARCH=$OPTARG
;;
esac
done
BUILDDATE=$(date -I)
IMG_FILE="Volumio${VERSION}-${BUILDDATE}-sparky.img"
if [ "$ARCH" = arm ]; then
DISTRO="Raspbian"
else
DISTRO="Debian 32bit"
fi
echo "Creating Image File ${IMG_FILE} with $DISTRO rootfs"
dd if=/dev/zero of=${IMG_FILE} bs=1M count=1600
echo "Creating Image Bed"
LOOP_DEV=`sudo losetup -f --show ${IMG_FILE}`
sudo parted -s "${LOOP_DEV}" mklabel msdos
sudo parted -s "${LOOP_DEV}" mkpart primary fat32 8 71
sudo parted -s "${LOOP_DEV}" mkpart primary ext3 71 1500
sudo parted -s "${LOOP_DEV}" mkpart primary ext3 1500 100%
sudo parted -s "${LOOP_DEV}" set 1 boot on
sudo parted -s "${LOOP_DEV}" print
sudo partprobe "${LOOP_DEV}"
sudo kpartx -s -a "${LOOP_DEV}"
BOOT_PART=`echo /dev/mapper/"$( echo ${LOOP_DEV} | sed -e 's/.*\/\(\w*\)/\1/' )"p1`
SYS_PART=`echo /dev/mapper/"$( echo ${LOOP_DEV} | sed -e 's/.*\/\(\w*\)/\1/' )"p2`
DATA_PART=`echo /dev/mapper/"$( echo ${LOOP_DEV} | sed -e 's/.*\/\(\w*\)/\1/' )"p3`
echo "Using: " ${BOOT_PART}
echo "Using: " ${SYS_PART}
echo "Using: " ${DATA_PART}
if [ ! -b "${BOOT_PART}" ]
then
echo "${BOOT_PART} doesn't exist"
exit 1
fi
echo "Creating boot and rootfs filesystems"
sudo mkfs -t vfat -n BOOT "${BOOT_PART}"
sudo mkfs -F -t ext4 -L volumio "${SYS_PART}"
sudo mkfs -F -t ext4 -L volumio_data "${DATA_PART}"
sync
echo "Preparing for the sparky kernel/ platform files"
if [ -d platform-sparky ]
then
echo "Platform folder already exists - keeping it"
# if you really want to re-clone from the repo, then delete the platforms-sparky folder
else
echo "Clone all sparky files from repo"
git clone https://github.com/volumio/platform-sparky.git platform-sparky
echo "Unpack the sparky platform files"
cd platform-sparky
tar xfJ sparky.tar.xz
cd ..
fi
echo "Burning the bootloader and u-boot"
sudo dd if=platform-sparky/sparky/u-boot/bootloader.bin of=${LOOP_DEV} bs=512 seek=4097
sudo dd if=platform-sparky/sparky/u-boot/u-boot-dtb.img of=${LOOP_DEV} bs=512 seek=6144
sync
echo "Preparing for Volumio rootfs"
if [ -d /mnt ]
then
echo "/mount folder exist"
else
sudo mkdir /mnt
fi
if [ -d /mnt/volumio ]
then
echo "Volumio Temp Directory Exists - Cleaning it"
rm -rf /mnt/volumio/*
else
echo "Creating Volumio Temp Directory"
sudo mkdir /mnt/volumio
fi
echo "Creating mount point for the images partition"
mkdir /mnt/volumio/images
sudo mount -t ext4 "${SYS_PART}" /mnt/volumio/images
sudo mkdir /mnt/volumio/rootfs
sudo mkdir /mnt/volumio/rootfs/boot
sudo mount -t vfat "${BOOT_PART}" /mnt/volumio/rootfs/boot
echo "Copying Volumio RootFs"
sudo cp -pdR build/$ARCH/root/* /mnt/volumio/rootfs
echo "Copying sparky boot files, kernel, modules and firmware"
sudo cp platform-sparky/sparky/boot/* /mnt/volumio/rootfs/boot
sudo cp -pdR platform-sparky/sparky/lib/modules /mnt/volumio/rootfs/lib
sudo cp -pdR platform-sparky/sparky/lib/firmware /mnt/volumio/rootfs/lib
echo "Copying DSP firmware and license from allocom dsp git"
# doing this here and not in config because cloning under chroot caused issues before"
git clone http://github.com/allocom/piano-firmware allo
cp -pdR allo/lib /mnt/volumio/rootfs
sudo rm -r allo
sync
echo "Preparing to run chroot for more sparky configuration"
cp scripts/sparkyconfig.sh /mnt/volumio/rootfs
cp scripts/initramfs/init /mnt/volumio/rootfs/root
cp scripts/initramfs/mkinitramfs-custom.sh /mnt/volumio/rootfs/usr/local/sbin
#copy the scripts for updating from usb
wget -P /mnt/volumio/rootfs/root http://repo.volumio.org/Volumio2/Binaries/volumio-init-updater
mount /dev /mnt/volumio/rootfs/dev -o bind
mount /proc /mnt/volumio/rootfs/proc -t proc
mount /sys /mnt/volumio/rootfs/sys -t sysfs
echo $PATCH > /mnt/volumio/rootfs/patch
chroot /mnt/volumio/rootfs /bin/bash -x <<'EOF'
su -
/sparkyconfig.sh
EOF
#cleanup
rm /mnt/volumio/rootfs/sparkyconfig.sh /mnt/volumio/rootfs/root/init
echo "Unmounting Temp devices"
umount -l /mnt/volumio/rootfs/dev
umount -l /mnt/volumio/rootfs/proc
umount -l /mnt/volumio/rootfs/sys
echo "==> sparky device installed"
#echo "Removing temporary platform files"
#echo "(you can keep it safely as long as you're sure of no changes)"
#sudo rm -r platforms-sparky
sync
echo "Preparing rootfs base for SquashFS"
if [ -d /mnt/squash ]; then
echo "Volumio SquashFS Temp Dir Exists - Cleaning it"
rm -rf /mnt/squash/*
else
echo "Creating Volumio SquashFS Temp Dir"
sudo mkdir /mnt/squash
fi
echo "Copying Volumio rootfs to Temp Dir"
cp -rp /mnt/volumio/rootfs/* /mnt/squash/
echo "Removing the Kernel"
rm -rf /mnt/squash/boot/*
echo "Creating SquashFS, removing any previous one"
rm -r Volumio.sqsh
mksquashfs /mnt/squash/* Volumio.sqsh
echo "Squash filesystem created"
echo "Cleaning squash environment"
rm -rf /mnt/squash
#copy the squash image inside the image partition
cp Volumio.sqsh /mnt/volumio/images/volumio_current.sqsh
sync
echo "Unmounting Temp Devices"
sudo umount -l /mnt/volumio/images
sudo umount -l /mnt/volumio/rootfs/boot
echo "Cleaning build environment"
rm -rf /mnt/volumio /mnt/boot
sudo dmsetup remove_all
sudo losetup -d ${LOOP_DEV}
sync
|
chrismade/Build
|
scripts/sparkyimage.sh
|
Shell
|
gpl-2.0
| 5,277 |
#!/bin/bash
CC=gcc-4.0
cd `dirname $0`
if [ ! -f Makefile ]; then
echo "This script must be run from the ioquake3 build directory"
exit 1
fi
# we want to use the oldest available SDK for max compatibility. However 10.4 and older
# can not build 64bit binaries, making 10.5 the minimum version. This has been tested
# with xcode 3.1 (xcode31_2199_developerdvd.dmg). It contains the 10.5 SDK and a decent
# enough gcc to actually compile ioquake3
# For PPC macs, G4's or better are required to run ioquake3.
unset X86_64_SDK
unset X86_64_CFLAGS
unset X86_64_MACOSX_VERSION_MIN
unset X86_SDK
unset X86_CFLAGS
unset X86_MACOSX_VERSION_MIN
unset PPC_SDK
unset PPC_CFLAGS
unset PPC_MACOSX_VERSION_MIN
if [ -d /Developer/SDKs/MacOSX10.5.sdk ]; then
X86_64_SDK=/Developer/SDKs/MacOSX10.5.sdk
X86_64_CFLAGS="-isysroot /Developer/SDKs/MacOSX10.5.sdk"
X86_64_MACOSX_VERSION_MIN="10.5"
X86_SDK=/Developer/SDKs/MacOSX10.5.sdk
X86_CFLAGS="-isysroot /Developer/SDKs/MacOSX10.5.sdk"
X86_MACOSX_VERSION_MIN="10.5"
PPC_SDK=/Developer/SDKs/MacOSX10.5.sdk
PPC_CFLAGS="-isysroot /Developer/SDKs/MacOSX10.5.sdk"
PPC_MACOSX_VERSION_MIN="10.5"
fi
if [ -z $X86_64_SDK ] || [ -z $X86_SDK ] || [ -z $PPC_SDK ]; then
echo "\
ERROR: This script is for building a Universal Binary. You cannot build
for a different architecture unless you have the proper Mac OS X SDKs
installed. If you just want to to compile for your own system run
'make-macosx.sh' instead of this script."
exit 1
fi
echo "Building X86_64 Client/Dedicated Server against \"$X86_64_SDK\""
echo "Building X86 Client/Dedicated Server against \"$X86_SDK\""
echo "Building PPC Client/Dedicated Server against \"$PPC_SDK\""
echo
if [ "$X86_64_SDK" != "/Developer/SDKs/MacOSX10.5.sdk" ] || \
[ "$X86_SDK" != "/Developer/SDKs/MacOSX10.5.sdk" ]; then
echo "\
WARNING: in order to build a binary with maximum compatibility you must
build on Mac OS X 10.5 using Xcode 3.1 and have the MacOSX10.5
SDKs installed from the Xcode install disk Packages folder."
sleep 3
fi
# For parallel make on multicore boxes...
NCPU=`sysctl -n hw.ncpu`
# x86_64 client and server
#if [ -d build/release-release-x86_64 ]; then
# rm -r build/release-darwin-x86_64
#fi
(ARCH=x86_64 CC=gcc-4.0 CFLAGS=$X86_64_CFLAGS MACOSX_VERSION_MIN=$X86_64_MACOSX_VERSION_MIN make -j$NCPU) || exit 1;
echo;echo
# x86 client and server
#if [ -d build/release-darwin-x86 ]; then
# rm -r build/release-darwin-x86
#fi
(ARCH=x86 CC=gcc-4.0 CFLAGS=$X86_CFLAGS MACOSX_VERSION_MIN=$X86_MACOSX_VERSION_MIN make -j$NCPU) || exit 1;
echo;echo
# PPC client and server
#if [ -d build/release-darwin-ppc ]; then
# rm -r build/release-darwin-ppc
#fi
(ARCH=ppc CC=gcc-4.0 CFLAGS=$PPC_CFLAGS MACOSX_VERSION_MIN=$PPC_MACOSX_VERSION_MIN make -j$NCPU) || exit 1;
echo
# use the following shell script to build a universal application bundle
export MACOSX_DEPLOYMENT_TARGET="10.5"
"./make-macosx-app.sh" release
|
wtfbbqhax/ioq3
|
make-macosx-ub.sh
|
Shell
|
gpl-2.0
| 2,968 |
#!/bin/bash
# -------------------------------------------------------------------------------
# Name: register-servicedata.sh
# Description: Registers Service Metadata for the current Node on an existing
# Consul cluster.
# Author: Carlos Veira Lorenzo - cveira [at] thinkinbig.org
# Version: 1.0
# Date: 2015/01/04
# -------------------------------------------------------------------------------
# Usage: register-servicedata.sh [-delayRegistration]
# -------------------------------------------------------------------------------
# Dependencies: logger, rsyslog, curl, cat, grep, awk, consul
# /root/tmp/.bootTimeEnvironment.sh
# /root/scripts/register-servicedata.conf
# -------------------------------------------------------------------------------
DelayRegistration=60
DataReplicationWait=10
if [ ! -f /root/tmp/.bootTimeEnvironment.sh ] ; then
echo "[Register-ServiceData] ERROR: Can't find Container run-time environment."
echo "[Register-ServiceData] Metadata File: /root/tmp/.bootTimeEnvironment.sh"
echo "[Register-ServiceData] ERROR: Can't find Container run-time environment." | logger
echo "[Register-ServiceData] Metadata File: /root/tmp/.bootTimeEnvironment.sh" | logger
exit 1
else
. /root/tmp/.bootTimeEnvironment.sh
fi
if [ ! -f /root/scripts/register-servicedata.conf ] ; then
echo "[Register-ServiceData] ERROR: Can't find Service Metadata." | logger
echo "[Register-ServiceData] Metadata File: /root/scripts/register-servicedata.conf" | logger
exit 1
fi
if [ "$1" == "-delayRegistration" ] ; then
echo "[Register-ServiceData] Delayed Registration requested." | logger
echo "[Register-ServiceData] Delaying for ${DelayRegistration} seconds ..." | logger
sleep ${DelayRegistration}
fi
echo "[Register-ServiceData] Registering Service Metadata ..." | logger
IFS=$'\n'
for MetadataItem in $( cat /root/scripts/register-servicedata.conf | grep -v "#" | grep . ); do
PropertyName=$( echo $MetadataItem | awk -F "\=" '{ print $1 }' )
PropertyValue=$( echo $MetadataItem | awk -F "\=" '{ print $2 }' )
TargetURL="http://${META_NODE_IP}:8500/v1/kv${PropertyName}"
echo "[Register-ServiceData] Item: ${PropertyName}" | logger
echo "[Register-ServiceData] Value: ${PropertyValue}" | logger
echo "[Register-ServiceData] TargetURL: ${TargetURL}" | logger
curl -X PUT -s -o /dev/null -d "${PropertyValue}" "${TargetURL}"
if [ $? -eq 0 ]; then
echo "[Register-ServiceData] Service metadata registration: SUCCESSFUL" | logger
else
echo "[Register-ServiceData] Service metadata registration: FAILED" | logger
fi
done
unset IFS
echo "[Register-ServiceData] Service Metadata registration completed." | logger
echo "[Register-ServiceData] Waiting for data to replicate throughout the Cluster." | logger
echo "[Register-ServiceData] Waiting time: ${DataReplicationWait} seconds ..." | logger
sleep $DataReplicationWait
echo "[Register-ServiceData] Waiting time finalized." | logger
|
cveira/docker-aux
|
settings/scripts/register-servicedata.sh
|
Shell
|
gpl-2.0
| 3,172 |
#! /bin/sh
# Copyright (C) 1996-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Some grep-based checks on yacc support:
#
# - Make sure intermediate .c file is built from yacc source.
# Report from Thomas Morgan.
#
# - Make sure intermediate .h file is not generated nor removed
# if (AM_)?YFLAGS do not contain '-d'.
# Requested by Jim Meyering.
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
AC_PROG_YACC
END
# Run it here once and for all, since we are not going to modify
# configure.ac anymore.
$ACLOCAL
cat > Makefile.am <<'END'
bin_PROGRAMS = zardoz
zardoz_SOURCES = zardoz.y
END
# Don't redefine several times the same variable.
cp Makefile.am Makefile.src
$AUTOMAKE -a
$FGREP 'zardoz.c' Makefile.in
# If zardoz.h IS mentioned, fail.
$FGREP 'zardoz.h' Makefile.in && exit 1
cp Makefile.src Makefile.am
echo 'AM_YFLAGS = -d' >> Makefile.am
$AUTOMAKE
$FGREP 'zardoz.c' Makefile.in
# If zardoz.h is NOT mentioned, fail.
$FGREP 'zardoz.h' Makefile.in
cp Makefile.src Makefile.am
echo 'AM_YFLAGS = ' >> Makefile.am
$AUTOMAKE
$FGREP 'zardoz.c' Makefile.in
# If zardoz.h IS mentioned, fail.
$FGREP 'zardoz.h' Makefile.in && exit 1
cp Makefile.src Makefile.am
echo 'YFLAGS = -d' >> Makefile.am
# YFLAGS is a user variable.
AUTOMAKE_fails
grep 'YFLAGS.* user variable' stderr
grep 'AM_YFLAGS.* instead' stderr
$AUTOMAKE -Wno-gnu
# If zardoz.h is NOT mentioned, fail.
$FGREP 'zardoz.h' Makefile.in
cp Makefile.src Makefile.am
echo 'YFLAGS = ' >> Makefile.am
$AUTOMAKE -Wno-gnu
# If zardoz.h IS mentioned, fail.
$FGREP 'zardoz.h' Makefile.in && exit 1
:
|
pylam/automake
|
t/yacc-grepping.sh
|
Shell
|
gpl-2.0
| 2,194 |
convert images/OCS-194-A.png -crop 1566x311+0+0 +repage images/OCS-194-A-0.png
convert -append images/OCS-193-B-12.png images/OCS-194-A-0.png images/OCS-193-B-12.png
rm images/OCS-194-A-0.png
convert images/OCS-194-A.png -crop 1566x309+0+312 +repage images/OCS-194-A-1.png
convert images/OCS-194-A.png -crop 1566x235+0+626 +repage images/OCS-194-A-2.png
convert images/OCS-194-A.png -crop 1566x313+0+870 +repage images/OCS-194-A-3.png
convert images/OCS-194-A.png -crop 1566x227+0+1192 +repage images/OCS-194-A-4.png
convert images/OCS-194-A.png -crop 1566x307+0+1426 +repage images/OCS-194-A-5.png
convert images/OCS-194-A.png -crop 1566x471+0+1742 +repage images/OCS-194-A-6.png
convert images/OCS-194-A.png -crop 1566x309+0+2222 +repage images/OCS-194-A-7.png
convert images/OCS-194-A.png -crop 1566x455+0+2540 +repage images/OCS-194-A-8.png
convert images/OCS-194-A.png -crop 1566x309+0+3014 +repage images/OCS-194-A-9.png
convert images/OCS-194-A.png -crop 1566x473+0+3330 +repage images/OCS-194-A-10.png
convert images/OCS-194-A.png -crop 1566x317+0+3804 +repage images/OCS-194-A-11.png
convert images/OCS-194-A.png -crop 1566x391+0+4130 +repage images/OCS-194-A-12.png
#
#/OCS-194.png
convert images/OCS-194-B.png -crop 1473x151+0+0 +repage images/OCS-194-B-0.png
convert -append images/OCS-194-A-12.png images/OCS-194-B-0.png images/OCS-194-A-12.png
rm images/OCS-194-B-0.png
convert images/OCS-194-B.png -crop 1473x543+0+160 +repage images/OCS-194-B-1.png
convert images/OCS-194-B.png -crop 1473x2705+0+708 +repage images/OCS-194-B-2.png
convert images/OCS-194-B.png -crop 1473x383+0+3424 +repage images/OCS-194-B-3.png
convert images/OCS-194-B.png -crop 1473x49+0+3838 +repage images/OCS-194-B-4.png
convert images/OCS-194-B.png -crop 1473x229+0+3902 +repage images/OCS-194-B-5.png
convert images/OCS-194-B.png -crop 1473x65+0+4144 +repage images/OCS-194-B-6.png
convert images/OCS-194-B.png -crop 1473x145+0+4220 +repage images/OCS-194-B-7.png
convert images/OCS-194-B.png -crop 1473x151+0+4372 +repage images/OCS-194-B-8.png
#
#/OCS-194.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/findindents.OCS-194.sh
|
Shell
|
gpl-2.0
| 2,053 |
#!/usr/bin/env bash
#===============================================================================
#
# FILE: recurrent_nnlm.ex003.make_configs.sh
#
# USAGE: ./recurrent_nnlm.ex003.make_configs.sh
#
# DESCRIPTION:
#
# NOTES: ---
# AUTHOR: Hao Fang, [email protected]
# CREATED: 03/03/2015 18:21
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
set -e
sys=$1
exid="recurrent_nnlm.ex003"
if [[ ${sys} == "win" ]]; then
basedir=$(pwd -P | sed 's/^\/cygdrive\/\(\w\)/\1:/g')
elif [[ ${sys} == "unix" ]]; then
basedir=$(ssli-dirname $0)
fi
cfg_datadir="${basedir}/data/babel_bp_105_llp"
cfg_trainfiles="${cfg_datadir}/train.txt"
cfg_validationfile="${cfg_datadir}/valid.txt"
cfg_vocfile="${cfg_datadir}/vocab.txt"
step="s100.basic"
config="configs/${exid}.${step}.config"
cat > ${config} <<EOF
debug = 0
trainfiles = ${cfg_trainfiles}
validationfile = ${cfg_validationfile}
vocfile = ${cfg_vocfile}
outbase = ${basedir}/expts/${exid}.${step}
shuffle-sentences = false
init-alpha = 0.1
batch-size = 1
min-improvement = 1
errorin-cutoff = 15
bptt = 1
hidden = 20
independent = true
EOF
step="s200.bias"
config="configs/${exid}.${step}.config"
cat > ${config} <<EOF
debug = 0
trainfiles = ${cfg_trainfiles}
validationfile = ${cfg_validationfile}
vocfile = ${cfg_vocfile}
outbase = ${basedir}/expts/${exid}.${step}
shuffle-sentences = false
init-alpha = 0.1
batch-size = 1
min-improvement = 1
errorin-cutoff = 15
bptt = 1
hidden = 20
globalbias = true
bias = true
independent = true
EOF
step="s800.nce"
config="configs/${exid}.${step}.config"
cat > ${config} <<EOF
debug = 0
trainfiles = ${cfg_trainfiles}
validationfile = ${cfg_validationfile}
vocfile = ${cfg_vocfile}
outbase = ${basedir}/expts/${exid}.${step}
shuffle-sentences = true
init-alpha = 1.0
batch-size = 1
min-improvement = 1
adagrad = true
errorin-cutoff = 15
bptt = 1
hidden = 20
globalbias = true
bias = true
independent = true
nce = true
nce-samples = 10
EOF
step="s850.nce_ppl"
config="configs/${exid}.${step}.config"
cat > ${config} <<EOF
debug = 0
trainfiles = ${cfg_trainfiles}
validationfile = ${cfg_validationfile}
vocfile = ${cfg_vocfile}
outbase = ${basedir}/expts/${exid}.${step}
shuffle-sentences = true
init-alpha = 1.0
batch-size = 1
min-improvement = 1
adagrad = true
errorin-cutoff = 15
bptt = 1
hidden = 20
globalbias = true
bias = true
independent = true
nce = true
nce-samples = 10
nce-ppl = true
EOF
|
hao-fang/UWNeuralNetLMRepo
|
examples/recurrent_nnlm.ex003.make_configs.sh
|
Shell
|
gpl-2.0
| 2,571 |
#!/bin/sh
#
# Copyright (c) 2013-2021, Christian Ferrari <[email protected]>
# All rights reserved.
#
# This file is part of FLoM.
#
# FLoM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# FLoM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FLoM. If not, see <http://www.gnu.org/licenses/>.
#
# Options:
# $1 ID
# $2 delay time
# $3 duration time
# $4 flom args
if test $# -lt 4
then
echo "At least four parameters must be specified"
exit 1
fi
# Triple comments are used to distinguish debug rows
# print start message
###echo -n $(date +'%s %N')
###echo " $1 starting and waiting $2 seconds"
# wait some seconds...
sleep $2
# execution with duration
###echo -n $(date +'%s %N')
echo " $1 locking for $3 seconds"
###echo "flom $4 -- sleep $3"
flom $4 -- sleep_and_echo.sh $3
EXIT_RC=$?
# print end message
###echo -n $(date +'%s %N')
echo " $1 ending"
exit $EXIT_RC
|
tiian/flom
|
tests/scripts/flom_test_exec4.sh
|
Shell
|
gpl-2.0
| 1,272 |
#!/usr/bin/env bash
###########################################################################
# astyle.sh
# ---------------------
# Date : August 2008
# Copyright : (C) 2008 by Juergen E. Fischer
# Email : jef at norbit dot de
###########################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
###########################################################################
# sort by version option
SV=V
if [[ "$OSTYPE" =~ darwin* ]]; then
SV=n
fi
min_version="3"
astyle_version_check() {
[ $(printf "$($1 --version 2>&1 | cut -d ' ' -f4)\\n$min_version" | sort -${SV} | head -n1) = "$min_version" ]
}
for ASTYLE in ${QGISSTYLE} $(dirname "$0")/qgisstyle $(dirname "$0")/RelWithDebInfo/qgisstyle astyle
do
if type -p "$ASTYLE" >/dev/null; then
if astyle_version_check "$ASTYLE"; then
break
fi
fi
ASTYLE=
done
if [ -z "$ASTYLE" ]; then
echo "qgisstyle / astyle not found - please install astyle >= $min_version or enable WITH_ASTYLE in cmake and build" >&2
exit 1
fi
if type -p tput >/dev/null; then
elcr="$ASTYLEPROGRESS$(tput el)$(tput cr)"
else
elcr="$ASTYLEPROGRESS \\r"
fi
if ! type -p flip >/dev/null; then
if type -p dos2unix >/dev/null; then
flip() {
dos2unix -q -k "$2"
}
else
echo "flip not found" >&2
flip() {
:
}
fi
fi
if ! type -p autopep8 >/dev/null; then
echo "autopep8 not found" >&2
autopep8() {
:
}
fi
ASTYLEOPTS=$(dirname "$0")/astyle.options
if type -p cygpath >/dev/null; then
ASTYLEOPTS="$(cygpath -w "$ASTYLEOPTS")"
fi
if type -p wslpath >/dev/null; then
ASTYLEOPTS="$(wslpath -a -w "$ASTYLEOPTS")"
fi
set -e
astyleit() {
$ASTYLE --options="$ASTYLEOPTS" "$1"
modified=$1.unify_includes_modified
cp "$1" "$modified"
perl -i.sortinc -n scripts/unify_includes.pl "$modified"
scripts/doxygen_space.pl "$modified"
diff "$1" "$modified" >/dev/null || mv "$modified" "$1"
rm -f "$modified"
}
for f in "$@"; do
case "$f" in
src/plugins/grass/qtermwidget/*|external/o2/*|external/qt-unix-signals/*|external/astyle/*|external/kdbush/*|external/poly2tri/*|external/wintoast/*|external/qt3dextra-headers/*|python/ext-libs/*|ui_*.py|*.astyle|tests/testdata/*|editors/*)
echo -ne "$f skipped $elcr"
continue
;;
*.cpp|*.h|*.c|*.cxx|*.hxx|*.c++|*.h++|*.cc|*.hh|*.C|*.H|*.hpp|*.mm)
if [ -x "$f" ]; then
chmod a-x "$f"
fi
cmd=astyleit
;;
*.ui|*.qgm|*.txt|*.t2t|resources/context_help/*)
cmd=:
;;
*.py)
#cmd="autopep8 --in-place --ignore=E111,E128,E201,E202,E203,E211,E221,E222,E225,E226,E227,E231,E241,E261,E265,E272,E302,E303,E501,E701"
echo -ne "Formatting $f $elcr"
cmd="autopep8 --in-place --ignore=E261,E265,E402,E501"
;;
*.sip)
cmd="perl -i.prepare -pe 's/[\\r\\t ]+$//; s#^(\\s*)/\\*[*!]\\s*([^\\s*].*)\\s*\$#\$1/** \\u\$2\\n#;'"
;;
*)
echo -ne "$f skipped $elcr"
continue
;;
esac
if ! [ -f "$f" ]; then
echo "$f not found" >&2
continue
fi
if [[ -f $f && $(head -c 3 "$f") == $'\xef\xbb\xbf' ]]; then
mv "$f" "$f".bom
tail -c +4 "$f".bom > "$f"
echo "removed BOM from $f"
fi
modified=$f.flip_modified
cp "$f" "$modified"
flip -ub "$modified"
diff "$f" "$modified" >/dev/null || mv "$modified" "$f"
rm -f "$modified"
eval "$cmd '$f'"
done
|
ahuarte47/QGIS
|
scripts/astyle.sh
|
Shell
|
gpl-2.0
| 3,769 |
#!/bin/bash
# Check if user is root
if [ $(id -u) != "0" ]; then
printf "Error: You must be root to run this script!\n"
exit 1
fi
printf "\n"
printf "============================\n"
printf " PostFix V2.6.6 Install \n"
printf " copyright: www.doitphp.com \n"
printf "============================\n"
printf "\n\n"
if [ ! -s websrc ]; then
printf "Error: directory websrc not found.\n"
exit 1
fi
printf "========= check postfix install whether or not =========\n\n"
isPostFix=`service postfix status | grep 'is running' | wc -l`
if [ "$isPostFix" == "0" ]; then
yum -y remove sendmail
yum -u install postfix
yum -y install cyrus-sasl
fi
printf "\npostfix version:\n"
postconf mail_version
printf "\npostfix configure file:\n"
cat /etc/postfix/main.cf
if [ -f /etc/sasl2/smtpd.conf ]; then
printf "\nsmtpd.conf file:\n"
cat /etc/sasl2/smtpd.conf
fi
chkconfig --list | grep posfix
chkconfig --list | grep saslauthd
#configure postfix file
read -p "smtp server domain name:" domain
mv /etc/postfix/main.cf /etc/postfix/main.cf.bak
cat >/etc/postfix/main.cf<<EOF
myhost= smtp.$domain
mydomain = $domain
myorigin = \$mydomain
inet_interfaces = all
mydestination = \$myhostname, \$mydomain
relay_domains = \$mydestination
#mynetworks = 192.168.1.0/100, 127.0.0.0/8
#home_mailbox = maildir/
mail_name = Postfix - $domain
smtp_helo_name = smtp.$domain
smtpd_banner = \$myhostname ESMTP unknow
smtpd_sasl_auth_enable = yes
smtpd_recipient_restrictions = permit_mynetworks,permit_sasl_authenticated,reject_unauth_destination
smtpd_sasl_security_options = noanonymous
bounce_queue_lifetime = 1d
maximal_queue_lifetime = 1d
message_size_limit = 15728640
local_recipient_maps =
unknown_local_recipient_reject_code = 550
EOF
service postfix stop
service postfix start
service saslauthd stop
service saslauthd start
chkconfig postfix on
chkconfig saslauthd on
alternatives --config mta
printf "\n========== Postfix install Completed! =======\n\n"
printf "============== The End. ==============\n"
|
doitphp/lnmp
|
postfix/setupShell/postfixInstall.sh
|
Shell
|
gpl-2.0
| 2,032 |
#!/bin/bash
#
# Run select tests by setting ONLY, or as arguments to the script.
# Skip specific tests by setting EXCEPT.
#
set -e
ONLY=${ONLY:-"$*"}
#Bug number for excepting test 6705
ALWAYS_EXCEPT="$SANITY_SCRUB_EXCEPT 1c 5 10"
[ "$SLOW" = "no" ] && EXCEPT_SLOW=""
# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
. $LUSTRE/tests/test-framework.sh
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
require_dsh_mds || exit 0
SAVED_MDSSIZE=${MDSSIZE}
SAVED_OSTSIZE=${OSTSIZE}
SAVED_OSTCOUNT=${OSTCOUNT}
# use small MDS + OST size to speed formatting time
# do not use too small MDSSIZE/OSTSIZE, which affect the default journal size
# 200M MDT device can guarantee uninitialized groups during the OI scrub
MDSSIZE=200000
OSTSIZE=100000
# no need too much OSTs, to reduce the format/start/stop overhead
stopall
[ $OSTCOUNT -gt 4 ] && OSTCOUNT=4
MOUNT_2=""
# build up a clean test environment.
formatall
setupall
[ $(facet_fstype $SINGLEMDS) != "ldiskfs" ] &&
skip "test OI scrub only for ldiskfs" && check_and_cleanup_lustre &&
exit 0
[ $(facet_fstype ost1) != "ldiskfs" ] &&
skip "test OI scrub only for ldiskfs" && check_and_cleanup_lustre &&
exit 0
[[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.2.90) ]] &&
skip "Need MDS version at least 2.2.90" && check_and_cleanup_lustre &&
exit 0
[[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.3.90) ]] &&
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 1a"
[[ $(lustre_version_code $SINGLEMDS) -le $(version_code 2.6.50) ]] &&
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 4"
[[ $(lustre_version_code $SINGLEMDS) -le $(version_code 2.4.1) ]] &&
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 15"
[[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.4.90) ]] &&
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.4.50) ]] &&
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 15"
[[ $(lustre_version_code ost1) -lt $(version_code 2.4.50) ]] &&
ALWAYS_EXCEPT="$ALWAYS_EXCEPT 11 12 13 14"
[[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.5.59) ]] &&
SCRUB_ONLY="-t scrub"
build_test_filter
MDT_DEV="${FSNAME}-MDT0000"
OST_DEV="${FSNAME}-OST0000"
MDT_DEVNAME=$(mdsdevname ${SINGLEMDS//mds/})
scrub_start() {
local error_id=$1
local n
# use "lfsck_start -A" when we no longer need testing interop
for n in $(seq $MDSCOUNT); do
do_facet mds$n $LCTL lfsck_start -M $(facet_svc mds$n) \
$SCRUB_ONLY "$@" ||
error "($error_id) Failed to start OI scrub on mds$n"
done
}
scrub_stop() {
local error_id=$1
local n
# use "lfsck_stop -A" when we no longer need testing interop
for n in $(seq $MDSCOUNT); do
do_facet mds$n $LCTL lfsck_stop -M $(facet_svc mds$n) ||
error "($error_id) Failed to stop OI scrub on mds$n"
done
}
scrub_status() {
local n=$1
do_facet mds$n $LCTL get_param -n \
osd-ldiskfs.$(facet_svc mds$n).oi_scrub
}
START_SCRUB="do_facet $SINGLEMDS $LCTL lfsck_start -M ${MDT_DEV} $SCRUB_ONLY"
START_SCRUB_ON_OST="do_facet ost1 $LCTL lfsck_start -M ${OST_DEV} $SCRUB_ONLY"
STOP_SCRUB="do_facet $SINGLEMDS $LCTL lfsck_stop -M ${MDT_DEV}"
SHOW_SCRUB="do_facet $SINGLEMDS \
$LCTL get_param -n osd-ldiskfs.${MDT_DEV}.oi_scrub"
SHOW_SCRUB_ON_OST="do_facet ost1 \
$LCTL get_param -n osd-ldiskfs.${OST_DEV}.oi_scrub"
MOUNT_OPTS_SCRUB="-o user_xattr"
MOUNT_OPTS_NOSCRUB="-o user_xattr,noscrub"
scrub_prep() {
local nfiles=$1
local n
check_mount_and_prep
echo "preparing... $(date)"
for n in $(seq $MDSCOUNT); do
echo "creating $nfiles files on mds$n"
if [ $n -eq 1 ]; then
mkdir $DIR/$tdir/mds$n ||
error "Failed to create directory mds$n"
else
$LFS mkdir -i $((n - 1)) $DIR/$tdir/mds$n ||
error "Failed to create remote directory mds$n"
fi
cp $LUSTRE/tests/*.sh $DIR/$tdir/mds$n ||
error "Failed to copy files to mds$n"
mkdir -p $DIR/$tdir/mds$n/d_$tfile ||
error "mkdir failed on mds$n"
createmany -m $DIR/$tdir/mds$n/d_$tfile/f 2 > \
/dev/null || error "create failed on mds$n"
if [[ $nfiles -gt 0 ]]; then
createmany -m $DIR/$tdir/mds$n/$tfile $nfiles > \
/dev/null || error "createmany failed on mds$n"
fi
done
echo "prepared $(date)."
cleanup_mount $MOUNT > /dev/null || error "Fail to stop client!"
for n in $(seq $MDSCOUNT); do
echo "stop mds$n"
stop mds$n > /dev/null || error "Fail to stop MDS$n!"
done
}
scrub_start_mds() {
local error_id=$1
local opts=$2
local n
for n in $(seq $MDSCOUNT); do
start mds$n $(mdsdevname $n) $opts >/dev/null ||
error "($error_id) Failed to start mds$n"
done
}
scrub_stop_mds() {
local error_id=$1
local n
for n in $(seq $MDSCOUNT); do
echo "stopping mds$n"
stop mds$n >/dev/null ||
error "($error_id) Failed to stop mds$n"
done
}
scrub_check_status() {
local error_id=$1
local expected=$2
local n
for n in $(seq $MDSCOUNT); do
wait_update_facet mds$n "$LCTL get_param -n \
osd-ldiskfs.$(facet_svc mds$n).oi_scrub |
awk '/^status/ { print \\\$2 }'" "$expected" 6 ||
error "($error_id) Expected '$expected' on mds$n"
done
}
scrub_check_flags() {
local error_id=$1
local expected=$2
local actual
local n
for n in $(seq $MDSCOUNT); do
actual=$(do_facet mds$n $LCTL get_param -n \
osd-ldiskfs.$(facet_svc mds$n).oi_scrub |
awk '/^flags/ { print $2 }')
if [ "$actual" != "$expected" ]; then
error "($error_id) Expected '$expected' on mds$n, but" \
"got '$actual'"
fi
done
}
scrub_check_params() {
local error_id=$1
local expected=$2
local actual
local n
for n in $(seq $MDSCOUNT); do
actual=$(do_facet mds$n $LCTL get_param -n \
osd-ldiskfs.$(facet_svc mds$n).oi_scrub |
awk '/^param/ { print $2 }')
if [ "$actual" != "$expected" ]; then
error "($error_id) Expected '$expected' on mds$n, but" \
"got '$actual'"
fi
done
}
scrub_check_repaired() {
local error_id=$1
local expected=$2
local actual
local n
for n in $(seq $MDSCOUNT); do
actual=$(do_facet mds$n $LCTL get_param -n \
osd-ldiskfs.$(facet_svc mds$n).oi_scrub |
awk '/^updated/ { print $2 }')
if [ $expected -eq 0 -a $actual -ne 0 ]; then
error "($error_id) Expected no repaired on mds$n, but" \
"got '$actual'"
fi
if [ $expected -ne 0 -a $actual -lt $expected ]; then
error "($error_id) Expected '$expected' on mds$n, but" \
"got '$actual'"
fi
done
}
scrub_check_data() {
local error_id=$1
local n
for n in $(seq $MDSCOUNT); do
diff -q $LUSTRE/tests/test-framework.sh \
$DIR/$tdir/mds$n/test-framework.sh ||
error "($error_id) File data check failed"
done
}
scrub_check_data2() {
local filename=$1
local error_id=$2
local n
for n in $(seq $MDSCOUNT); do
diff -q $LUSTRE/tests/$filename \
$DIR/$tdir/mds$n/$filename ||
error "($error_id) File data check failed"
done
}
scrub_remove_ois() {
local error_id=$1
local index=$2
local n
for n in $(seq $MDSCOUNT); do
mds_remove_ois mds$n $index ||
error "($error_id) Failed to remove OI .$index on mds$n"
done
}
scrub_backup_restore() {
local error_id=$1
local igif=$2
local n
for n in $(seq $MDSCOUNT); do
mds_backup_restore mds$n $igif ||
error "(error_id) Backup/restore on mds$n failed"
done
}
scrub_enable_auto() {
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
osd-ldiskfs.*.auto_scrub=1
}
full_scrub_ratio() {
[[ $(lustre_version_code $SINGLEMDS) -le $(version_code 2.6.50) ]] &&
return
local ratio=$1
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
osd-ldiskfs.*.full_scrub_ratio=$ratio
}
full_scrub_threshold_rate() {
[[ $(lustre_version_code $SINGLEMDS) -le $(version_code 2.6.50) ]] &&
return
local rate=$1
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param -n \
osd-ldiskfs.*.full_scrub_threshold_rate=$rate
}
test_0() {
scrub_prep 0
echo "starting MDTs without disabling OI scrub"
scrub_start_mds 1 "$MOUNT_OPTS_SCRUB"
scrub_check_status 2 init
scrub_check_flags 3 ""
mount_client $MOUNT || error "(4) Fail to start client!"
scrub_check_data 5
}
run_test 0 "Do not auto trigger OI scrub for non-backup/restore case"
test_1a() {
scrub_prep 0
echo "start $SINGLEMDS without disabling OI scrub"
scrub_start_mds 1 "$MOUNT_OPTS_SCRUB"
local FLAGS=$($SHOW_SCRUB | awk '/^flags/ { print $2 }')
[ -z "$FLAGS" ] || error "(3) Expect empty flags, but got '$FLAGS'"
mount_client $MOUNT || error "(4) Fail to start client!"
#define OBD_FAIL_OSD_FID_MAPPING 0x193
do_facet $SINGLEMDS $LCTL set_param fail_loc=0x193
# update .lustre OI mapping
touch $MOUNT/.lustre
do_facet $SINGLEMDS $LCTL set_param fail_loc=0
umount_client $MOUNT || error "(5) Fail to stop client!"
echo "stop $SINGLEMDS"
stop $SINGLEMDS > /dev/null || error "(6) Fail to stop MDS!"
echo "start $SINGLEMDS with disabling OI scrub"
start $SINGLEMDS $MDT_DEVNAME $MOUNT_OPTS_NOSCRUB > /dev/null ||
error "(7) Fail to start MDS!"
local FLAGS=$($SHOW_SCRUB | awk '/^flags/ { print $2 }')
[ "$FLAGS" == "inconsistent" ] ||
error "(9) Expect 'inconsistent', but got '$FLAGS'"
}
run_test 1a "Auto trigger initial OI scrub when server mounts"
test_1b() {
scrub_prep 0
scrub_remove_ois 1
echo "start MDTs without disabling OI scrub"
scrub_start_mds 2 "$MOUNT_OPTS_SCRUB"
scrub_check_status 3 completed
mount_client $MOUNT || error "(4) Fail to start client!"
scrub_check_data 5
}
run_test 1b "Trigger OI scrub when MDT mounts for OI files remove/recreate case"
test_1c() {
local index
# OI files to be removed:
# idx 0: oi.16.0
# idx 2: oi.16.{2,4,8,16,32}
# idx 3: oi.16.{3,9,27}
for index in 0 2 3; do
scrub_prep 0
scrub_remove_ois 1 $index
echo "start MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 3 recreated
scrub_start 4
scrub_check_status 5 completed
scrub_check_flags 6 ""
done
}
run_test 1c "Auto detect kinds of OI file(s) removed/recreated cases"
test_2() {
scrub_prep 0
scrub_backup_restore 1
echo "starting MDTs without disabling OI scrub"
scrub_start_mds 2 "$MOUNT_OPTS_SCRUB"
scrub_check_status 3 completed
mount_client $MOUNT || error "(4) Fail to start client!"
scrub_check_data 5
}
run_test 2 "Trigger OI scrub when MDT mounts for backup/restore case"
# test_3 is obsolete, it will be covered by test_5.
test_3() {
formatall > /dev/null
setupall > /dev/null
scrub_prep 0
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_status 3 init
scrub_check_flags 4 inconsistent
}
#run_test 3 "Do not trigger OI scrub when MDT mounts if 'noscrub' specified"
test_4a() {
scrub_prep 0
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
full_scrub_ratio 0
scrub_check_data 6
sleep 3
scrub_check_status 7 completed
scrub_check_flags 8 ""
local -a updated0
for n in $(seq $MDSCOUNT); do
updated0[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
done
scrub_check_data2 sanity-scrub.sh 9
sleep 3
local -a updated1
for n in $(seq $MDSCOUNT); do
updated1[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -eq ${updated1[$n]} ] ||
error "(10) NOT auto trigger full scrub as expected"
done
}
run_test 4a "Auto trigger OI scrub if bad OI mapping was found (1)"
test_4b() {
scrub_prep 5
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
full_scrub_ratio 10
full_scrub_threshold_rate 10000
scrub_check_data 6
sleep 3
scrub_check_status 7 completed
scrub_check_flags 8 ""
local -a updated0
for n in $(seq $MDSCOUNT); do
updated0[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
done
scrub_check_data2 sanity-scrub.sh 9
sleep 3
scrub_check_status 10 completed
scrub_check_flags 11 ""
local -a updated1
for n in $(seq $MDSCOUNT); do
updated1[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -lt ${updated1[$n]} ] ||
error "(12) Auto trigger full scrub unexpectedly"
done
for n in $(seq $MDSCOUNT); do
ls -l $DIR/$tdir/mds$n/*.sh > /dev/null ||
error "(13) fail to ls"
done
sleep 3
scrub_check_status 14 completed
scrub_check_flags 15 ""
for n in $(seq $MDSCOUNT); do
updated0[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -gt ${updated1[$n]} ] ||
error "(16) Auto trigger full scrub unexpectedly"
done
for n in $(seq $MDSCOUNT); do
ls -l $DIR/$tdir/mds$n/d_${tfile}/ || error "(17) fail to ls"
done
sleep 3
for n in $(seq $MDSCOUNT); do
updated1[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -eq ${updated1[$n]} ] ||
error "(18) NOT auto trigger full scrub as expected"
done
}
run_test 4b "Auto trigger OI scrub if bad OI mapping was found (2)"
test_4c() {
scrub_prep 500
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
full_scrub_ratio 2
full_scrub_threshold_rate 20
scrub_check_data 6
sleep 3
scrub_check_status 7 completed
scrub_check_flags 8 ""
local -a updated0
for n in $(seq $MDSCOUNT); do
updated0[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
done
scrub_check_data2 sanity-scrub.sh 9
sleep 3
scrub_check_status 10 completed
scrub_check_flags 11 ""
local -a updated1
for n in $(seq $MDSCOUNT); do
updated1[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -lt ${updated1[$n]} ] ||
error "(12) Auto trigger full scrub unexpectedly"
done
for n in $(seq $MDSCOUNT); do
ls -l $DIR/$tdir/mds$n/*.sh > /dev/null ||
error "(13) fail to ls"
done
sleep 3
scrub_check_status 14 completed
scrub_check_flags 15 ""
for n in $(seq $MDSCOUNT); do
updated0[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -gt ${updated1[$n]} ] ||
error "(16) Auto trigger full scrub unexpectedly"
done
for n in $(seq $MDSCOUNT); do
ls -l $DIR/$tdir/mds$n/${tfile}1 || error "(17) fail to ls"
done
sleep 3
for n in $(seq $MDSCOUNT); do
updated1[$n]=$(scrub_status $n |
awk '/^sf_items_updated_prior/ { print $2 }')
[ ${updated0[$n]} -eq ${updated1[$n]} ] ||
error "(18) NOT auto trigger full scrub as expected"
done
}
run_test 4c "Auto trigger OI scrub if bad OI mapping was found (3)"
test_5() {
formatall > /dev/null
setupall > /dev/null
scrub_prep 1000
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled (1)"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_status 3 init
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
full_scrub_ratio 0
scrub_check_data 6
umount_client $MOUNT || error "(7) Fail to stop client!"
scrub_check_status 8 scanning
#define OBD_FAIL_OSD_SCRUB_CRASH 0x191
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param fail_loc=0x191
sleep 4
scrub_stop_mds 9
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
echo "starting MDTs with OI scrub disabled (2)"
scrub_start_mds 10 "$MOUNT_OPTS_NOSCRUB"
scrub_check_status 11 crashed
scrub_stop_mds 12
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
echo "starting MDTs without disabling OI scrub"
scrub_start_mds 13 "$MOUNT_OPTS_SCRUB"
scrub_check_status 14 scanning
#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param fail_loc=0x192
scrub_check_status 15 failed
mount_client $MOUNT || error "(16) Fail to start client!"
full_scrub_ratio 0
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
local n
for n in $(seq $MDSCOUNT); do
stat $DIR/$tdir/mds$n/${tfile}800 ||
error "(17) Failed to stat mds$n/${tfile}800"
done
scrub_check_status 18 scanning
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
scrub_check_status 19 completed
scrub_check_flags 20 ""
}
run_test 5 "OI scrub state machine"
test_6() {
scrub_prep 1000
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=2 fail_loc=0x190
full_scrub_ratio 0
scrub_check_data 6
# Sleep 5 sec to guarantee at least one object processed by OI scrub
sleep 5
# Fail the OI scrub to guarantee there is at least one checkpoint
#define OBD_FAIL_OSD_SCRUB_FATAL 0x192
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param fail_loc=0x192
scrub_check_status 7 failed
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
local n
for n in $(seq $MDSCOUNT); do
# stat will re-trigger OI scrub
stat $DIR/$tdir/mds$n/${tfile}800 ||
error "(8) Failed to stat mds$n/${tfile}800"
done
umount_client $MOUNT || error "(9) Fail to stop client!"
scrub_check_status 10 scanning
#define OBD_FAIL_OSD_SCRUB_CRASH 0x191
do_nodes $(comma_list $(mdts_nodes)) $LCTL set_param fail_loc=0x191
sleep 4
local -a position0
for n in $(seq $MDSCOUNT); do
position0[$n]=$(scrub_status $n |
awk '/^last_checkpoint_position/ {print $2}')
position0[$n]=$((${position0[$n]} + 1))
done
scrub_stop_mds 11
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
echo "starting MDTs without disabling OI scrub"
scrub_start_mds 12 "$MOUNT_OPTS_SCRUB"
scrub_check_status 13 scanning
local -a position1
for n in $(seq $MDSCOUNT); do
position1[$n]=$(scrub_status $n |
awk '/^latest_start_position/ {print $2}')
if [ ${position0[$n]} -ne ${position1[$n]} ]; then
error "(14) Expected position ${position0[$n]}, but" \
"got ${position1[$n]}"
fi
done
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
scrub_check_status 15 completed
scrub_check_flags 16 ""
}
run_test 6 "OI scrub resumes from last checkpoint"
test_7() {
scrub_prep 500
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
full_scrub_ratio 0
scrub_check_data 6
local n
for n in $(seq $MDSCOUNT); do
stat $DIR/$tdir/mds$n/${tfile}300 ||
error "(7) Failed to stat mds$n/${tfile}300!"
done
scrub_check_status 8 scanning
scrub_check_flags 9 inconsistent,auto
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
scrub_check_status 10 completed
scrub_check_flags ""
}
run_test 7 "System is available during OI scrub scanning"
test_8() {
scrub_prep 128
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=1 fail_loc=0x190
scrub_start 5
scrub_check_status 6 scanning
scrub_stop 7
scrub_check_status 8 stopped
scrub_start 9
scrub_check_status 10 scanning
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
scrub_check_status 11 completed
scrub_check_flags 12 ""
}
run_test 8 "Control OI scrub manually"
test_9() {
if [ -z "$(grep "processor.*: 1" /proc/cpuinfo)" ]; then
skip "Testing on UP system, the speed may be inaccurate."
return 0
fi
scrub_prep 6000
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
local BASE_SPEED1=100
local RUN_TIME1=10
# OI scrub should run with full speed under inconsistent case
scrub_start 5 -s $BASE_SPEED1
sleep $RUN_TIME1
scrub_check_status 6 completed
scrub_check_flags 7 ""
# OI scrub should run with limited speed under non-inconsistent case
scrub_start 8 -s $BASE_SPEED1 -r
sleep $RUN_TIME1
scrub_check_status 9 scanning
# Do NOT ignore that there are 1024 pre-fetched items. And there
# may be time error, normally it should be less than 2 seconds.
# We allow another 20% schedule error.
local PRE_FETCHED=1024
local TIME_DIFF=2
# MAX_MARGIN = 1.2 = 12 / 10
local MAX_SPEED=$(((PRE_FETCHED + BASE_SPEED1 * \
(RUN_TIME1 + TIME_DIFF)) / RUN_TIME1 * 12 / 10))
local n
for n in $(seq $MDSCOUNT); do
local SPEED=$(scrub_status $n | \
awk '/^average_speed/ { print $2 }')
[ $SPEED -lt $MAX_SPEED ] ||
error "(10) Got speed $SPEED, expected less than" \
"$MAX_SPEED"
done
# adjust speed limit
local BASE_SPEED2=300
local RUN_TIME2=10
for n in $(seq $MDSCOUNT); do
do_facet mds$n $LCTL set_param -n \
mdd.$(facet_svc mds$n).lfsck_speed_limit $BASE_SPEED2
done
sleep $RUN_TIME2
# MIN_MARGIN = 0.8 = 8 / 10
local MIN_SPEED=$(((PRE_FETCHED + \
BASE_SPEED1 * (RUN_TIME1 - TIME_DIFF) + \
BASE_SPEED2 * (RUN_TIME2 - TIME_DIFF)) / \
(RUN_TIME1 + RUN_TIME2) * 8 / 10))
# MAX_MARGIN = 1.2 = 12 / 10
MAX_SPEED=$(((PRE_FETCHED + \
BASE_SPEED1 * (RUN_TIME1 + TIME_DIFF) + \
BASE_SPEED2 * (RUN_TIME2 + TIME_DIFF)) / \
(RUN_TIME1 + RUN_TIME2) * 12 / 10))
for n in $(seq $MDSCOUNT); do
SPEED=$(scrub_status $n | awk '/^average_speed/ { print $2 }')
[ $SPEED -gt $MIN_SPEED ] ||
error "(11) Got speed $SPEED, expected more than" \
"$MIN_SPEED"
[ $SPEED -lt $MAX_SPEED ] ||
error "(12) Got speed $SPEED, expected less than" \
"$MAX_SPEED"
do_facet mds$n $LCTL set_param -n \
mdd.$(facet_svc mds$n).lfsck_speed_limit 0
done
scrub_check_status 13 completed
}
run_test 9 "OI scrub speed control"
test_10a() {
scrub_prep 0
scrub_backup_restore 1
echo "starting mds$n with OI scrub disabled (1)"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
mount_client $MOUNT || error "(5) Fail to start client!"
scrub_enable_auto
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=1 fail_loc=0x190
full_scrub_ratio 0
scrub_check_data 6
scrub_check_status 7 scanning
umount_client $MOUNT || error "(8) Fail to stop client!"
scrub_stop_mds 9
echo "starting MDTs with OI scrub disabled (2)"
scrub_start_mds 10 "$MOUNT_OPTS_NOSCRUB"
scrub_check_status 11 paused
scrub_stop_mds 12
echo "starting MDTs without disabling OI scrub"
scrub_start_mds 13 "$MOUNT_OPTS_SCRUB"
scrub_check_status 14 scanning
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
scrub_check_status 15 completed
scrub_check_flags 16 ""
}
run_test 10a "non-stopped OI scrub should auto restarts after MDS remount (1)"
# test_10b is obsolete, it will be coverded by related sanity-lfsck tests.
test_10b() {
scrub_prep 0
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_flags 4 inconsistent
#define OBD_FAIL_OSD_SCRUB_DELAY 0x190
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_val=3 fail_loc=0x190
scrub_start 5
scrub_check_status 6 scanning
scrub_stop_mds 7
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 8 "$MOUNT_OPTS_NOSCRUB"
scrub_check_status 9 paused
scrub_stop_mds 10
echo "starting MDTs without disabling OI scrub"
scrub_start_mds 11 "$MOUNT_OPTS_SCRUB"
scrub_check_status 12 scanning
do_nodes $(comma_list $(mdts_nodes)) \
$LCTL set_param fail_loc=0 fail_val=0
scrub_check_status 13 completed
scrub_check_flags 14 ""
}
#run_test 10b "non-stopped OI scrub should auto restarts after MDS remount (2)"
test_11() {
local CREATED=100
local n
check_mount_and_prep
for n in $(seq $MDSCOUNT); do
$LFS mkdir -i $((n - 1)) $DIR/$tdir/mds$n ||
error "(1) Fail to mkdir $DIR/$tdir/mds$n"
createmany -o $DIR/$tdir/mds$n/f $CREATED ||
error "(2) Fail to create under $tdir/mds$n"
done
# reset OI scrub start point by force
scrub_start 3 -r
scrub_check_status 4 completed
declare -a checked0
declare -a checked1
# OI scrub should skip the new created objects for the first accessing
# notice we're creating a new llog for every OST on every startup
# new features can make this even less stable, so we only check that
# the number of skipped files is more than the number or known created
local MINIMUM=$((CREATED + 1)) # files + directory
for n in $(seq $MDSCOUNT); do
local SKIPPED=$(scrub_status $n | awk '/^noscrub/ { print $2 }')
[ $SKIPPED -lt $MINIMUM ] &&
error "(5) Expect at least $MINIMUM objects" \
"skipped on mds$n, but got $SKIPPED"
checked0[$n]=$(scrub_status $n | awk '/^checked/ { print $2 }')
done
# reset OI scrub start point by force
scrub_start 6 -r
scrub_check_status 7 completed
# OI scrub should skip the new created object only once
for n in $(seq $MDSCOUNT); do
SKIPPED=$(scrub_status $n | awk '/^noscrub/ { print $2 }')
checked1[$n]=$(scrub_status $n | awk '/^checked/ { print $2 }')
[ ${checked0[$n]} -ne ${checked1[$n]} -o $SKIPPED -eq 0 ] ||
error "(8) Expect 0 objects skipped on mds$n, but" \
"got $SKIPPED"
done
}
run_test 11 "OI scrub skips the new created objects only once"
test_12() {
check_mount_and_prep
$SETSTRIPE -c 1 -i 0 $DIR/$tdir
#define OBD_FAIL_OSD_COMPAT_INVALID_ENTRY 0x195
do_facet ost1 $LCTL set_param fail_loc=0x195
local count=$(precreated_ost_obj_count 0 0)
createmany -o $DIR/$tdir/f $((count + 32))
umount_client $MOUNT || error "(1) Fail to stop client!"
stop ost1 || error "(2) Fail to stop ost1"
#define OBD_FAIL_OST_NODESTROY 0x233
do_facet ost1 $LCTL set_param fail_loc=0x233
start ost1 $(ostdevname 1) $MOUNT_OPTS_NOSCRUB ||
error "(3) Fail to start ost1"
mount_client $MOUNT || error "(4) Fail to start client!"
ls -ail $DIR/$tdir > /dev/null 2>&1 && error "(5) ls should fail"
$START_SCRUB_ON_OST -r || error "(6) Fail to start OI scrub on OST!"
do_facet ost1 $LCTL set_param fail_loc=0
wait_update_facet ost1 "$LCTL get_param -n \
osd-ldiskfs.$(facet_svc ost1).oi_scrub |
awk '/^status/ { print \\\$2 }'" "completed" 6 ||
error "(7) Expected '$expected' on ost1"
ls -ail $DIR/$tdir > /dev/null || {
$SHOW_SCRUB_ON_OST
error "(8) ls should succeed"
}
}
run_test 12 "OI scrub can rebuild invalid /O entries"
test_13() {
check_mount_and_prep
$SETSTRIPE -c 1 -i 0 $DIR/$tdir
#define OBD_FAIL_OSD_COMPAT_NO_ENTRY 0x196
do_facet ost1 $LCTL set_param fail_loc=0x196
local count=$(precreated_ost_obj_count 0 0)
createmany -o $DIR/$tdir/f $((count + 32))
do_facet ost1 $LCTL set_param fail_loc=0
umount_client $MOUNT || error "(1) Fail to stop client!"
stop ost1 || error "(2) Fail to stop ost1"
start ost1 $(ostdevname 1) $MOUNT_OPTS_NOSCRUB ||
error "(3) Fail to start ost1"
mount_client $MOUNT || error "(4) Fail to start client!"
ls -ail $DIR/$tdir > /dev/null 2>&1 && error "(5) ls should fail"
$START_SCRUB_ON_OST -r || error "(6) Fail to start OI scrub on OST!"
wait_update_facet ost1 "$LCTL get_param -n \
osd-ldiskfs.$(facet_svc ost1).oi_scrub |
awk '/^status/ { print \\\$2 }'" "completed" 6 ||
error "(7) Expected '$expected' on ost1"
ls -ail $DIR/$tdir > /dev/null || error "(8) ls should succeed"
}
run_test 13 "OI scrub can rebuild missed /O entries"
test_14() {
check_mount_and_prep
$SETSTRIPE -c 1 -i 0 $DIR/$tdir
#define OBD_FAIL_OSD_COMPAT_NO_ENTRY 0x196
do_facet ost1 $LCTL set_param fail_loc=0x196
local count=$(precreated_ost_obj_count 0 0)
createmany -o $DIR/$tdir/f $((count + 32))
do_facet ost1 $LCTL set_param fail_loc=0
umount_client $MOUNT || error "(1) Fail to stop client!"
stop ost1 || error "(2) Fail to stop ost1"
echo "run e2fsck"
run_e2fsck $(facet_host ost1) $(ostdevname 1) "-y" ||
error "(3) Fail to run e2fsck error"
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS ||
error "(4) Fail to start ost1"
mount_client $MOUNT || error "(5) Fail to start client!"
local LF_REPAIRED=$($SHOW_SCRUB_ON_OST |
awk '/^lf_repa[ri]*ed/ { print $2 }')
[ $LF_REPAIRED -gt 0 ] ||
error "(6) Some entry under /lost+found should be repaired"
ls -ail $DIR/$tdir > /dev/null || error "(7) ls should succeed"
}
run_test 14 "OI scrub can repair objects under lost+found"
test_15() {
# skip test_15 for LU-4182
[ $MDSCOUNT -ge 2 ] && skip "skip now for >= 2 MDTs" && return
local server_version=$(lustre_version_code $SINGLEMDS)
scrub_prep 20
scrub_backup_restore 1
echo "starting MDTs with OI scrub disabled"
scrub_start_mds 2 "$MOUNT_OPTS_NOSCRUB"
scrub_check_status 3 init
scrub_check_flags 4 inconsistent
# run under dryrun mode
if [ $server_version -lt $(version_code 2.5.58) ]; then
scrub_start 5 --dryrun on
else
scrub_start 5 --dryrun
fi
scrub_check_status 6 completed
scrub_check_flags 7 inconsistent
scrub_check_params 8 dryrun
scrub_check_repaired 9 20
# run under dryrun mode again
if [ $server_version -lt $(version_code 2.5.58) ]; then
scrub_start 10 --dryrun on
else
scrub_start 10 --dryrun
fi
scrub_check_status 11 completed
scrub_check_flags 12 inconsistent
scrub_check_params 13 dryrun
scrub_check_repaired 14 20
# run under normal mode
#
# Lustre-2.x (x <= 5) used "-n off" to disable dryrun which does not
# work under Lustre-2.y (y >= 6), the test script should be fixed as
# "-noff" or "--dryrun=off" or nothing by default.
if [ $server_version -lt $(version_code 2.5.58) ]; then
scrub_start 15 --dryrun off
else
scrub_start 15
fi
scrub_check_status 16 completed
scrub_check_flags 17 ""
scrub_check_params 18 ""
scrub_check_repaired 19 20
# run under normal mode again
if [ $server_version -lt $(version_code 2.5.58) ]; then
scrub_start 20 --dryrun off
else
scrub_start 20
fi
scrub_check_status 21 completed
scrub_check_flags 22 ""
scrub_check_params 23 ""
scrub_check_repaired 24 0
}
run_test 15 "Dryrun mode OI scrub"
# restore MDS/OST size
MDSSIZE=${SAVED_MDSSIZE}
OSTSIZE=${SAVED_OSTSIZE}
OSTCOUNT=${SAVED_OSTCOUNT}
# cleanup the system at last
formatall
complete $SECONDS
exit_status
|
HPCStack/lustre-release
|
lustre/tests/sanity-scrub.sh
|
Shell
|
gpl-2.0
| 30,976 |
#!/bin/sh
# see if we are in the top of the tree
if [ ! -f configure.proto ]; then
cd ../..
if [ ! -f configure.proto ]; then
echo "please run this script from the base of the VICE directory"
echo "or from the appropriate build directory"
exit 1
fi
fi
curdir=`pwd`
CC=owcc CXX=owcc RANLIB=true STRIP=wstrip AR="/bin/sh $curdir/src/arch/win32/watcom/watcom-ar.sh" ./configure -v --host=i686-pc-mingw32 --enable-native-tools=owcc --disable-dependency-tracking
make
make bindist
|
AreaScout/vice-gles2
|
build/openwatcom/win32-build.sh
|
Shell
|
gpl-2.0
| 497 |
#!/bin/bash
### InterGenOS setup.sh - Put sources and variables in place to build the Temporary System
### Written by Christopher 'InterGen' Cork <[email protected]>
### 2/19/2015
#############################################################################################
##****!!!! Please note that the user 'igos' is being given the pass 'intergenos' !!!!****##
#############################################################################################
### Make sure setup.sh is being run as user 'root'
if [[ $EUID -ne 0 ]]; then
echo " "
echo " "
echo "WARNING ****************************************"
echo " "
echo " setup.sh must be run as user 'root'. Exiting." 1>&2
echo " "
echo "WARNING ****************************************"
echo " "
echo " "
exit 1
fi
### Color variables
red="$(echo -e "\033[0;31m")"
green="$(echo -e "\033[0;32m")"
lblue="$(echo -e "\033[1;34m")"
NC="$(echo -e "\033[0m")"
### Python heading
function print_heading {
python - <<END
### Python variables
import time
import sys
### Python functions
def delay_print(s):
for c in s:
sys.stdout.write( '%s' % c )
sys.stdout.flush()
time.sleep(0.003)
def delay_print2(s):
for c in s:
sys.stdout.write( '%s' % c )
sys.stdout.flush()
time.sleep(0.4)
print(" ")
print(" ")
delay_print('${lblue}*************************************************************${NC}')
print(" ")
delay_print("${lblue}| |${NC}")
print(" ")
delay_print("${lblue}|${NC} ${green}Welcome to ${NC}Inter${lblue}Gen${NC}OS ${green}Initial Setup${NC} ${lblue}|${NC}")
print(" ")
delay_print("${lblue}| |${NC}")
print(" ")
delay_print("${lblue}|${NC} ${green}Build${NC}_${green}001${NC} ${lblue}|${NC}")
print(" ")
delay_print("${lblue}| |${NC}")
print(" ")
delay_print("${lblue}*************************************************************${NC}")
print(" ")
END
}
clear
print_heading
echo " "
echo " "
echo " "
echo " "
echo " "
echo "${green}Please enter the ${NC}Partition ID ${green}where you'd like to build ${NC}Inter${lblue}Gen${NC}OS"
echo " "
read -p "${green}Set it up for you in /dev/${NC}?${green}:${NC} "
echo " "
echo "${green}Ok, target build ${NC}Partition ID ${green}is /dev/${NC}$REPLY"
echo " "
read -p "${green}Ready to begin${NC}? [y/n] ${green}:${NC} " opt
if [ $opt = y ]; then
echo " "
echo -e "${green}Thank you, the build will now proceed${NC}"
sleep 1
echo " "
echo -e "${green}You will be prompted for manual entries when needed.${NC}"
sleep 1
echo " "
echo -e "${green}We appreciate your participation in the InterGenOS project.${NC}"
sleep 1
echo " "
echo " "
mkdir -pv $IGos
mount -v -t ext4 /dev/$REPLY $IGos
echo "export IGosPart=/dev/$REPLY" >> ~/.bash_profile
else
echo " "
echo -e "${red}Oh,${NC}"
sleep 1
echo " "
echo -e "${red}snap,${NC}"
sleep 1
echo " "
echo -e "${green}Well, then let's make sure we've got your ${NC}Build Partition ID ${green}set right, ok?${NC}"
echo " "
read -p "${green}Which ${NC}Partition ID ${green}did you want it set up on- /dev/${NC}?${green}:${NC} " REPLY2
echo " "
echo " "
echo -e "${green}Alright, target build ${NC}Partition ID ${green}is set to /dev/${NC}$REPLY2"
echo " "
read -p "${green}Ready to begin${NC}? [y/n] ${green}:${NC} " opt2
if [ $opt2 = y ]; then
echo " "
echo -e "${green}Thank you, the build will now proceed${NC}"
sleep 1
echo " "
echo -e "${green}You will be prompted for manual entries when needed.${NC}"
sleep 1
echo " "
echo -e "${green}We appreciate your participation in the InterGenOS project.${NC}"
sleep 1
echo " "
echo " "
mkdir -pv $IGos
mount -v -t ext4 /dev/$REPLY2 $IGos
echo "export IGosPart=/dev/$REPLY2" >> ~/.bash_profile
else
echo " "
echo -e "${red}Oh,${NC}"
sleep 1
echo " "
echo -e "${red}snap,${NC}"
sleep 1
echo " "
echo -e "${NC}Hmmm...${lblue}Ok, you should double check your ${NC}Build Partition ID ${lblue}and then run the ${NC}Initial Setup ${lblue}again.${NC}"
sleep 1
echo " "
echo -e "${green}We'll hang out right here 'till you get back. ${NC}:) "
sleep 1
echo " "
exit 0
fi
fi
### Create and set permissions on source directory
mkdir -v $IGos/sources
chmod -v a+wt $IGos/sources
### Get and unpack sources
wget http://intergenstudios.com/Downloads/InterGenOS/build001/Core/build001_Core_Sources.tar.gz &&
tar xf intergen_os_sources.tar.gz &&
mv intergen_os_sources/* $IGos/sources/ &&
rm -rf intergen_os_sources intergen_os_sources.tar.gz &&
### Create and link tools directory
mkdir -v $IGos/tools
ln -sv $IGos/tools /
### Create the 'igos' group and user
groupadd igos
useradd -s /bin/bash -g igos -m -k /dev/null igos
echo "igos:intergenos" | chpasswd &&
### Change ownership of tools and sources directories to user 'igos'
chown -v igos $IGos/tools
chown -v igos $IGos/sources
### Download Temp System build script, set ownership, and make it executable
wget https://raw.githubusercontent.com/InterGenOS/build_001/master/build_temp_sys.sh -P $IGos
chmod +x $IGos/build_temp_sys.sh
chown -v igos $IGos/build_temp_sys.sh
### Set .bash_profile and .bashrc for user 'igos'
cat > /home/igos/.bash_profile << "EOF"
exec env -i HOME=$HOME TERM=$TERM PS1='\u:\w\$ ' /bin/bash
EOF
cat > /home/igos/.bashrc << "EOF"
set +h
umask 022
IGos=/mnt/igos
LC_ALL=POSIX
IGos_TGT=$(uname -m)-igos-linux-gnu
PATH=/tools/bin:/bin:/usr/bin
export IGos LC_ALL IGos_TGT PATH
cd $IGos
./build_temp_sys.sh
EOF
function clearLine() {
tput cuu 1 && tput el
}
echo " "
echo " "
echo "=========================================="
echo "| |"
echo "| Switching to shell for user |"
echo "| 'igos' |"
echo "| in 5 seconds |"
echo "| |"
echo "| Preparing for build_temp_sys.sh |"
echo "| |"
echo "| This may take awhile... |"
echo "| |"
echo "=========================================="
echo " "
function SleepTimer() {
Count=5
while [ $Count -gt 0 ]; do
echo Starting in: $Count
sleep 1
clearLine
let Count=Count-1
done
}
SleepTimer
clearLine
echo " "
echo " "
echo " "
echo "Reticulating Splines - Switching shells..."
sleep 2
clearLine
echo "Go grab yourself a stimulating beverage..."
sleep 1
echo "This will take a little while..."
echo " "
echo " "
echo " "
sleep 2
su - igos
|
InterGenOS/build_001---deprecated
|
setup.sh
|
Shell
|
gpl-2.0
| 7,020 |
#!/bin/sh
# create cjw_newsletter build package
# tar.gz package
EXTENSION_NAME=cjw_newsletter
CURRENT_DIR=`pwd`
cd ..
cd ..
cd ..
EZROOT=`pwd`
echo '---------------------------------------------------'
echo "START Build:" $EXTENSION_NAME
echo '---------------------------------------------------'
cd $EZROOT
# CURRENT_TIMESTAMP=`date +%s`
# echo "$CURRENT_TIMESTAMP"
php ./extension/cjw_extensiontools/bin/php/build.php -d extension/$EXTENSION_NAME
echo '---------------------------------------------------'
echo "END Build:" $EXTENSION_NAME
echo '---------------------------------------------------'
cd $CURRENT_DIR
|
styleflashernewmedia/cjw_newsletter
|
_build/build_extension.sh
|
Shell
|
gpl-2.0
| 634 |
time sh demo.sh
|
esoule/crosstool
|
time.sh
|
Shell
|
gpl-2.0
| 16 |
#!/bin/sh
# this can be called from another script as ./q3hostfw <server-id> <port-number>
NAME=${1:-LIMITER} # you need to specify unique table name for each port
PORT=${2:-27960} # and unique server port as well
RATE=768/second
BURST=128
# flush INPUT table:
#iptables -F INPUT
# insert our rule at the beginning of the INPUT chain:
iptables -I INPUT \
-p udp --dport $PORT -m hashlimit \
--hashlimit-mode srcip \
--hashlimit-above $RATE \
--hashlimit-burst $BURST \
--hashlimit-name $NAME \
-j DROP
|
Q3HQ/Quake3hq
|
docs/firewall/fw-q3host.sh
|
Shell
|
gpl-2.0
| 532 |
#!/bin/sh
#
# mailer-t1.sh: test #1 for the mailer.py script
#
# This test generates "email" for each revision in the repository,
# concatenating them into one big blob, which is then compared against
# a known output.
#
# Note: mailer-tweak.py must have been run to make the test outputs
# consistent and reproducible
#
# USAGE: ./mailer-t1.sh REPOS MAILER-SCRIPT
#
if test "$#" != 2; then
echo "USAGE: ./mailer-t1.sh REPOS MAILER-SCRIPT"
exit 1
fi
scripts="`dirname $0`"
scripts="`cd $scripts && pwd`"
glom=$scripts/mailer-t1.current
orig=$scripts/mailer-t1.output
conf=$scripts/mailer.conf
rm -f $glom
export TZ=GST
youngest="`svnlook youngest $1`"
for rev in `python -c "print(\" \".join(map(str, range(1,$youngest+1))))"`; do
$2 commit $1 $rev $conf >> $glom
done
echo "current mailer.py output in: $glom"
dos2unix $glom
echo diff -q $orig $glom
diff -q $orig $glom && echo "SUCCESS: no differences detected"
|
bdmod/extreme-subversion
|
BinarySourcce/subversion-1.6.17/tools/hook-scripts/mailer/tests/mailer-t1.sh
|
Shell
|
gpl-2.0
| 938 |
#!/bin/sh
NODE_BIN=./node_modules/.bin
# Mocha (express app tests)
NODE_ENV=test $NODE_BIN/mocha --reporter spec app/tests
# Karma (angular app tests)
NODE_ENV=test $NODE_BIN/karma start
# Protractor (end-to-end tests)
NODE_ENV=test npm start &> /dev/null &
SERVER_PID=$!
sleep 5
$NODE_BIN/protractor
kill $SERVER_PID && echo "Killed npm server" || echo "Failed to kill server ($SERVER_PID)"
|
peap/rankcfb
|
test.sh
|
Shell
|
gpl-2.0
| 395 |
#! /bin/sh
# Copyright (C) 2007-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Test Automake TESTS color output, by forcing it.
# Keep this in sync with the sister test 'color-tests2.sh'.
required='grep-nonprint'
# For gen-testsuite-part: ==> try-with-serial-tests <==
. test-init.sh
# Escape '[' for grep, below.
red="$esc\[0;31m"
grn="$esc\[0;32m"
lgn="$esc\[1;32m"
blu="$esc\[1;34m"
mgn="$esc\[0;35m"
std="$esc\[m"
cat >>configure.ac <<END
AC_OUTPUT
END
cat >Makefile.am <<'END'
TESTS = $(check_SCRIPTS)
check_SCRIPTS = pass fail skip xpass xfail error
XFAIL_TESTS = xpass xfail
END
cat >pass <<END
#! /bin/sh
exit 0
END
cat >fail <<END
#! /bin/sh
exit 1
END
cat >skip <<END
#! /bin/sh
exit 77
END
cat >error <<END
#! /bin/sh
exit 99
END
cp fail xfail
cp pass xpass
chmod +x pass fail skip xpass xfail error
$ACLOCAL
$AUTOCONF
$AUTOMAKE --add-missing
test_color ()
{
# Not a useless use of cat; see above comments "grep-nonprinting"
# requirement in 'test-init.sh'.
cat stdout | grep "^${grn}PASS${std}: .*pass"
cat stdout | grep "^${red}FAIL${std}: .*fail"
cat stdout | grep "^${blu}SKIP${std}: .*skip"
cat stdout | grep "^${lgn}XFAIL${std}: .*xfail"
cat stdout | grep "^${red}XPASS${std}: .*xpass"
# The old serial testsuite driver doesn't distinguish between failures
# and hard errors.
if test x"$am_serial_tests" = x"yes"; then
cat stdout | grep "^${red}FAIL${std}: .*error"
else
cat stdout | grep "^${mgn}ERROR${std}: .*error"
fi
:
}
test_no_color ()
{
# With make implementations that, like Solaris make, in case of errors
# print the whole failing recipe on standard output, we should content
# ourselves with a laxer check, to avoid false positives.
# Keep this in sync with lib/am/check.am:$(am__color_tests).
if $FGREP '= Xalways; then' stdout; then
# Extra verbose make, resort to laxer checks.
# Note that we also want to check that the testsuite summary is
# not unduly colorized.
(
set +e # In case some grepped regex below isn't matched.
# Not a useless use of cat; see above comments "grep-nonprinting"
# requirement in 'test-init.sh'.
cat stdout | grep "TOTAL.*:"
cat stdout | grep "PASS.*:"
cat stdout | grep "FAIL.*:"
cat stdout | grep "SKIP.*:"
cat stdout | grep "XFAIL.*:"
cat stdout | grep "XPASS.*:"
cat stdout | grep "ERROR.*:"
cat stdout | grep 'test.*expected'
cat stdout | grep 'test.*not run'
cat stdout | grep '===='
cat stdout | grep '[Ss]ee .*test-suite\.log'
cat stdout | grep '[Tt]estsuite summary'
) | grep "$esc" && exit 1
: For shells with broken 'set -e'
else
cat stdout | grep "$esc" && exit 1
: For shells with broken 'set -e'
fi
}
for vpath in false :; do
if $vpath; then
mkdir build
cd build
srcdir=..
else
srcdir=.
fi
$srcdir/configure
# Forced colorization should take place also with non-ANSI terminals;
# hence the "TERM=dumb" definition.
AM_COLOR_TESTS=always; export AM_COLOR_TESTS
run_make -e FAIL -O TERM=dumb check
test_color
unset AM_COLOR_TESTS
run_make -e FAIL -O TERM=ansi check
test_no_color
$MAKE distclean
cd $srcdir
done
:
|
komh/automake-os2
|
t/color-tests.sh
|
Shell
|
gpl-2.0
| 3,833 |
#!/bin/bash
if [[ $(dpkg-query -W -f='${Status}\n' python) != 'install ok installed' ]]; then
apt-get install -y --force-yes python
fi
if [[ $(dpkg-query -W -f='${Status}\n' python-pip) != 'install ok installed' ]]; then
apt-get install -y --force-yes python-pip
fi
#Iniciamos los módulos de GAE
git submodule init && \
git submodule sync && \
git submodule update
#Desplegamos la aplicación
GoogleAppEngineSDK/appcfg.py --oauth2 --oauth2_credential_file=secrets/.appcfg_oauth2_tokens update src/
|
juanfranrv/iDronDataAnalyzer
|
deployToGAE.sh
|
Shell
|
gpl-2.0
| 510 |
#!/bin/bash
# This is a simple script to search a directory for TIFF images and convert them to PDF.
# It relies on find, tiff2pdf, and egrep to work its magic.
# Files ending in '.tiff' or '.tif' (in any case) are supported.
# Found this script here: http://www.neowin.net/forum/topic/1143030-need-helping-making-tiff2pdf-command-recursive/
tiffs=$(find '.' -type f -regextype 'posix-egrep' -iregex '.*\.tif[f]{0,1}')
for tiff in $tiffs; do
echo $tiff | grep -qsiE '\.tif$'
if [ $? -eq 0 ]; then
extension_length=3
else
extension_length=4
fi
pdf="${tiff:0:$((${#tiff}-$extension_length))}pdf"
echo "tiff2pdf -o \"$pdf\" \"$tiff\""
tiff2pdf -o "$pdf" "$tiff"
done
exit 0
|
opendatakosovo/useful-bash-scripts
|
tiffs2pdfs.sh
|
Shell
|
gpl-2.0
| 770 |
#!/bin/sh
# --- SDE-COPYRIGHT-NOTE-BEGIN ---
# This copyright note is auto-generated by ./scripts/Create-CopyPatch.
#
# Filename: lib/version.sh
# Copyright (C) 2010 The OpenSDE Project
#
# More information can be found in the files COPYING and README.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License. A copy of the
# GNU General Public License can be found in the file COPYING.
# --- SDE-COPYRIGHT-NOTE-END ---
# NAME
# version.sh - determine OpenSDE version information
#
# SYNOPSIS
# version.sh [-fp]
#
# DESCRIPTION
# The version.sh script determines OpenSDE version informations
# by first checking if dedicated VERSION files for the OpenSDE
# framework and the package tree do exist, for extracting the
# version strings out of the files. In the case a VERSION file
# does not exist or if it extracts an empty version string from
# the an existing VERSION file it will assign a default version.
#
# If the version strings of the framework and the package tree
# are not the same the returned version string will be composed
# out of framework and the package tree version strings delimited
# by an '-'. Otherwise it will only return the framework version
# string.
#
# OPTIONS
# -f
# only return the version of the OpenSDE framework
# -p
# only return the version of the OpenSDE package tree
#
version_sh_usage() {
echo "Usage: $0 [-fp]" >&2
}
while [ $# -gt 0 ] ; do
case "$1" in
-f)
mode=fm ;;
-p)
mode=pkg ;;
-*) version_sh_usage
exit 1 ;;
*)
break ;;
esac
shift
done
[ -n "$SDEROOT" ] || SDEROOT='.'
for x in fmver:etc/VERSION pkgver:package/VERSION; do
k="${x%%:*}" f="${x#*:}"
v=$(head -n1 "$SDEROOT/$f" 2> /dev/null)
[ -n "$v" ] || v=master
eval "$k='$v'"
done
case "$mode" in
fm) echo "$fmver" ;;
pkg) echo "$pkgver" ;;
*) if [ "$fmver" = "$pkgver" ]; then
echo "$fmver"
else
echo "$fmver-$pkgver"
fi
;;
esac
|
OpenSDE/opensde-nopast
|
lib/version.sh
|
Shell
|
gpl-2.0
| 2,043 |
#!/bin/bash
source ./test_DATA.sh
source ./test_ffmpeg.sh
source ./test_MCTF.sh
source ./test_wget.sh
source ./test_Kakadu.sh
VIDEO=coastguard_352x288x30x420x300
GOPs=1 # Without including the first GOP (that has only a I image)
Y_DIM=288
X_DIM=352
FPS=30
MAX_Q_SCALE=46000
TRLs=6
# TRLs=1 GOP_SIZE=1
# TRLs=2 GOP_SIZE=2
# TRLs=3 GOP_SIZE=4
# TRLs=4 GOP_SIZE=8
# TRLs=5 GOP_SIZE=16
# TRLs=6 GOP_SIZE=32
if [[ ! -e $DATA/$VIDEO.yuv ]]; then
current_dir=$PWD
cd $DATA
wget http://www.hpca.ual.es/~vruiz/videos/$VIDEO.avi
ffmpeg -i $VIDEO.avi $VIDEO.yuv
cd $current_dir
fi
set -x
DATA_DIR=data-${0##*/}
rm -rf $DATA_DIR
mkdir $DATA_DIR
cd $DATA_DIR
ln -s $DATA/$VIDEO.yuv low_0
Q_SCALE=$MAX_Q_SCALE
while [ $Q_SCALE -ge 43000 ]
do
../RD-MCJ2K.sh -v $VIDEO -g $GOPs -y $Y_DIM -x $X_DIM -f $FPS -q "$Q_SCALE,$[Q_SCALE-1000]" -t $TRLs >> RD-MCJ2K-$VIDEO.dat 2>> stderr
let Q_SCALE=Q_SCALE-500
done
set +x
|
vicente-gonzalez-ruiz/QSVC
|
trunk/tests/RD-MCJ2K-coastguard_352x288x30x420x300.sh
|
Shell
|
gpl-2.0
| 935 |
#!/bin/bash
function displayUsage
{
echo "Usage: ./encode_hls.sh <source file path> <file prefix of output HLS segments> <logfile>"
exit
}
function write_status
{
echo "$(date +'%Y-%m-%d %H:%M:%S') STATUS $1" >> $2
}
if [[ -z "$4" ]]
then
displayUsage
fi
inputfile=$1
output_prefix=$2
logfile=$3
hlsurl=$4
STATUS_ENCODING_HLS=1
STATUS_ENCODING_HLS_DONE=2
STATUS_ENCODING_HLS_ERROR=3
STATUS_UNK_ERROR=4
currentdir="$(pwd)/"
echo "$(date +'%Y-%m-%d %H:%M:%S') CURRENTDIR $currentdir" >> $logfile
echo "$(date +'%Y-%m-%d %H:%M:%S') HLS 1-PASS ENCODE START" >> $logfile
write_status $STATUS_ENCODING_HLS $logfile
aspect=""
check_1280_720=$(/usr/local/bin/ffmpeg -i "$inputfile" 2>&1 | grep "1280x720")
if [[ -n "$check_1280_720" ]]
then
aspect="-s 720x406"
fi
check_960x540=$(/usr/local/bin/ffmpeg -i "$inputfile" 2>&1 | grep "960x540")
if [[ -n "$check_960x540" ]]
then
aspect="-s 720x406"
fi
echo "aspect=$aspect"
/usr/local/src/ffmpeg_2_0_1/ffmpeg-2.0.1/ffmpeg -deinterlace -vol 256 -y -i "$inputfile" -y -threads 4 -flags loop -vcodec libx264 -g 250 -b 640k -bt 640k -maxrate 1500k $aspect -bufsize 3000k -qmin 2 -qmax 51 -subq 5 -me_method umh -cmp 2 -subcmp 2 -mbd 2 -b_strategy 1 -bidir_refine 1 -partitions parti4x4+partp8x8+partp4x4+partb8x8 -profile baseline -level 13 -async 100 -acodec aac -strict experimental -ar 44100 -ac 2 -ab 128k raw.ts && \
echo "$(date +'%Y-%m-%d %H:%M:%S') HLS ENCODE SUCCESS" >> $logfile || error_exit "$(date +'%Y-%m-%d %H:%M:%S') HLS ENCODE FAILED" $logfile $STATUS_ENCODING_HLS_ERROR
echo "$(date +'%Y-%m-%d %H:%M:%S') HLS SEGMENT START: $output_prefix.ts" >> $logfile
/usr/local/src/ffmpeg_2_0_1/ffmpeg-2.0.1/ffmpeg -i "$currentdir"raw.ts -c copy -flags global_header -map 0 -f segment -segment_list raw.m3u8 -segment_list_size 0 -segment_time 10 -segment_format mpegts raw-%d.ts && echo "$(date +'%Y-%m-%d %H:%M:%S') HLS SEGMENT SUCCESS" >> $logfile || error_exit "$(date +'%Y-%m-%d %H:%M:%S') HLS SEGMENT FAILED" $logfile $STATUS_ENCODING_HLS_ERROR
echo "$(date +'%Y-%m-%d %H:%M:%S') HLS SEGMENT END" >> $logfile
####READJUST SEGMENT DURATIONS
offset=0
rm -f adj_$output_prefix.m3u8
cp raw.m3u8 adj_$output_prefix.m3u8
####ADJUST LAST SEGMENT DURATION
srcvid=$(ffmpeg -i $2 2>&1 | grep "Duration" | cut -d ' ' -f 4 | sed s/,//)
rm -f temp.m3u8
cp raw.m3u8 temp.m3u8
rawts=$(/usr/local/bin/ffmpeg -i raw.ts 2>&1 | grep "Duration" | cut -d ' ' -f 4 | sed s/,//)
tsfiletotal="0"
tsfilesizetotal="0"
for j in {0..100}; do
curfilename=$(echo "raw-$(($j)).ts")
if [ -f $curfilename ]
then
init_vector=`printf '%032x' $j`
echo "Getting info... input: $curfilename "
curduration=$(/usr/local/bin/ffmpeg -i $curfilename 2>&1 | grep "Duration" | cut -d ' ' -f 4 | sed s/,// | sed s/00:00:// )
cursize=$(perl -e 'printf "%d\n" ,(stat(shift))[7]; ' $curfilename)
if [ -z "$curduration" ]
then
echo "ffmpeg failed to get duration"
else
tsfiletotal=$(echo "scale=9; $curduration + $tsfiletotal" | bc)
tsfilesizetotal=$(echo "scale=9; $cursize + $tsfilesizetotal" | bc)
fi
echo "$curduration $tsfiletotal"
eval sed -i 's/$curfilename//g' temp.m3u8
else
echo "Last ts file part found $j-1"
break;
fi
done
sed 's/#EXTM3U//g' temp.m3u8 | sed 's/#EXT-X-VERSION:3//g' | sed 's/#EXT-X-MEDIA-SEQUENCE:0//g' | sed 's/#EXT-X-ALLOWCACHE:1//g' \
| sed 's/#EXTINF://g' | sed 's/#EXT-X-TARGETDURATION:[0-9]\+//g' | sed 's/#EXT-X-ENDLIST//g' | sed 's/,//g' > temp.txt
cat temp.txt
m3u8total=$(awk '{ OFMT="%.6f"; sum += $1 } END { print sum }' temp.txt)
lastduration=$(awk '{ OFMT="%.6f"; if( $1 > 0 ) last =$1 } END { print last }' temp.txt)
diff=$(echo "scale=6; $m3u8total - $tsfiletotal" | bc)
replace=$(echo "scale=6; $lastduration - $diff" | bc)
echo "Prefix: $output_prefix"
echo "Src: $2 $srcvid"
echo "Raw TS: $rawts"
echo "M3U8: $m3u8total"
echo "Files: $tsfiletotal"
echo "LastSegment: $lastduration"
echo "Diff: $diff"
echo "Replace with: $replace"
echo -e '\n'
checkneg=$(echo $replace | sed 's/[^-]//g')
if [[ -z "$checkneg" ]]
then
eval sed -i 's/#EXTINF:$lastduration/#EXTINF:$replace/g' adj_$output_prefix.m3u8
fi
####ENCRYPTION
keyfilename=$(echo "$output_prefix.txt")
echo "AES Key: $keyfilename"
openssl rand 16 > $keyfilename
key_as_hex=$(cat $keyfilename | hexdump -e '16/1 "%02x"')
maxtsfiles=$(echo 100)
for j in {0..100}; do
curfilename=$(echo "raw-$(($j)).ts")
if [ -f $curfilename ]
then
init_vector=`printf '%032x' $j`
echo "Encrypting... input: $curfilename output: $3-$(($j)).ts"
openssl aes-128-cbc -e -in $curfilename -out $output_prefix-$(($j)).ts -p -nosalt -iv $init_vector -K $key_as_hex
else
echo "Last ts file part found $j-1"
break;
fi
done
####ENCRYPTED M3U8 CREATION
rm -f $output_prefix.m3u8
cp adj_$output_prefix.m3u8 $output_prefix.m3u8
insertstring=$(echo '3i#EXT-X-KEY:METHOD=AES-128,URI=\"$keyfilename\"')
eval sed -i 's/raw-/$output_prefix-/g' $output_prefix.m3u8
eval sed -i $insertstring $output_prefix.m3u8
echo "$(date +'%Y-%m-%d %H:%M:%S') HLS ENCODE END" >> $logfile
write_status $STATUS_ENCODING_HLS_DONE $logfile
echo "$(date +'%Y-%m-%d %H:%M:%S') HLS_SIZE $tsfilesizetotal" >> "$logfile"
|
mrnastor/chopsuey
|
shell_scripts/encode_hls.sh
|
Shell
|
gpl-2.0
| 5,254 |
#! /bin/sh
# Copyright (C) 2005-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check that 'make -n' works with the lisp_LISP recover rule.
required='emacs non-root'
. test-init.sh
cat > Makefile.am << 'EOF'
dist_lisp_LISP = am-one.el am-two.el am-three.el
EOF
cat >> configure.ac << 'EOF'
AM_PATH_LISPDIR
AC_OUTPUT
EOF
echo "(require 'am-two)" > am-one.el
echo "(require 'am-three) (provide 'am-two)" > am-two.el
echo "(provide 'am-three)" > am-three.el
$ACLOCAL
$AUTOCONF
$AUTOMAKE --add-missing
./configure
$MAKE
test -f am-one.elc
test -f am-two.elc
test -f am-three.elc
rm -f am-*.elc
chmod a-w .
$MAKE -n
test ! -e am-one.elc
test ! -e am-two.elc
test ! -e am-three.elc
:
|
komh/automake-os2
|
t/lispdry.sh
|
Shell
|
gpl-2.0
| 1,303 |
STATUS=`defaults read com.apple.finder AppleShowAllFiles`
QUERY="{query}"
if [ "$QUERY" == "off" ];
then
if [ $STATUS == NO ];
then
defaults write com.apple.finder AppleShowAllFiles YES
#killall Finder
killall Finder /System/Library/CoreServices/Finder.app
echo "Show Hidden Files Now"
fi
echo "Already Show Hidden Files"
else
if [ "$QUERY" == "on" ];
then
if [ $STATUS == YES ];
then
defaults write com.apple.finder AppleShowAllFiles NO
#killall Finder
killall Finder /System/Library/CoreServices/Finder.app
echo "Hide Hidden Files Now"
fi
echo "Already Hide Hidden Files"
else
if [ "$QUERY" == "" ];
then
if [ "$STATUS" == "YES" ];
then
echo "Finder Show Hidden Files Now"
else
echo "Finder Don't Show Hidden Files Now"
fi
fi
fi
fi
|
wzxjohn/File-Hidden-Switch-Workflow
|
source/script.sh
|
Shell
|
gpl-2.0
| 853 |
#!/bin/bash
source /etc/kubernetes/controller-manager
source /etc/kubernetes/config
export KUBE_LOGTOSTDERR
export KUBE_LOG_LEVEL
export KUBE_MASTER
export KUBE_CONTROLLER_MANAGER_ARGS
exec /usr/bin/kube-controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS \
$@
|
DirectXMan12/Fedora-Dockerfiles
|
kubernetes/controller-manager/launch.sh
|
Shell
|
gpl-2.0
| 326 |
#!/bin/sh
# Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB
# This file is public domain and comes with NO WARRANTY of any kind
# MySQL daemon start/stop script.
# Usually this is put in /etc/init.d (at least on machines SYSV R4 based
# systems) and linked to /etc/rc3.d/S99mysql and /etc/rc0.d/K01mysql.
# When this is done the mysql server will be started when the machine is
# started and shut down when the systems goes down.
# Comments to support chkconfig on RedHat Linux
# chkconfig: 2345 64 36
# description: A very fast and reliable SQL database engine.
# Comments to support LSB init script conventions
### BEGIN INIT INFO
# Provides: mysql
# Required-Start: $local_fs $network $remote_fs
# Should-Start: ypbind nscd ldap ntpd xntpd
# Required-Stop: $local_fs $network $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: start and stop MySQL
# Description: MySQL is a very fast and reliable SQL database engine.
### END INIT INFO
# If you install MySQL on some other places than @prefix@, then you
# have to do one of the following things for this script to work:
#
# - Run this script from within the MySQL installation directory
# - Create a /etc/my.cnf file with the following information:
# [mysqld]
# basedir=<path-to-mysql-installation-directory>
# - Add the above to any other configuration file (for example ~/.my.ini)
# and copy my_print_defaults to /usr/bin
# - Add the path to the mysql-installation-directory to the basedir variable
# below.
#
# If you want to affect other MySQL variables, you should make your changes
# in the /etc/my.cnf, ~/.my.cnf or other MySQL configuration files.
# If you change base dir, you must also change datadir. These may get
# overwritten by settings in the MySQL configuration files.
basedir=
datadir=
# Default value, in seconds, afterwhich the script should timeout waiting
# for server start.
# Value here is overriden by value in my.cnf.
# 0 means don't wait at all
# Negative numbers mean to wait indefinitely
service_startup_timeout=900
# Lock directory for RedHat / SuSE.
lockdir='/var/lock/subsys'
lock_file_path="$lockdir/mysql"
# The following variables are only set for letting mysql.server find things.
# Set some defaults
mysqld_pid_file_path=
if test -z "$basedir"
then
basedir=@prefix@
bindir=@bindir@
if test -z "$datadir"
then
datadir=@localstatedir@
fi
sbindir=@sbindir@
libexecdir=@libexecdir@
else
bindir="$basedir/bin"
if test -z "$datadir"
then
datadir="$basedir/data"
fi
sbindir="$basedir/sbin"
libexecdir="$basedir/libexec"
fi
# datadir_set is used to determine if datadir was set (and so should be
# *not* set inside of the --basedir= handler.)
datadir_set=
#
# Use LSB init script functions for printing messages, if possible
#
lsb_functions="/lib/lsb/init-functions"
if test -f $lsb_functions ; then
. $lsb_functions
else
log_success_msg()
{
echo " SUCCESS! $@"
}
log_failure_msg()
{
echo " ERROR! $@"
}
fi
PATH="/sbin:/usr/sbin:/bin:/usr/bin:$basedir/bin"
export PATH
mode=$1 # start or stop
[ $# -ge 1 ] && shift
other_args="$*" # uncommon, but needed when called from an RPM upgrade action
# Expected: "--skip-networking --skip-grant-tables"
# They are not checked here, intentionally, as it is the resposibility
# of the "spec" file author to give correct arguments only.
case `echo "testing\c"`,`echo -n testing` in
*c*,-n*) echo_n= echo_c= ;;
*c*,*) echo_n=-n echo_c= ;;
*) echo_n= echo_c='\c' ;;
esac
parse_server_arguments() {
for arg do
case "$arg" in
--basedir=*) basedir=`echo "$arg" | sed -e 's/^[^=]*=//'`
bindir="$basedir/bin"
if test -z "$datadir_set"; then
datadir="$basedir/data"
fi
sbindir="$basedir/sbin"
libexecdir="$basedir/libexec"
;;
--datadir=*) datadir=`echo "$arg" | sed -e 's/^[^=]*=//'`
datadir_set=1
;;
--pid-file=*) mysqld_pid_file_path=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
--service-startup-timeout=*) service_startup_timeout=`echo "$arg" | sed -e 's/^[^=]*=//'` ;;
esac
done
}
wait_for_pid () {
verb="$1" # created | removed
pid="$2" # process ID of the program operating on the pid-file
pid_file_path="$3" # path to the PID file.
i=0
avoid_race_condition="by checking again"
while test $i -ne $service_startup_timeout ; do
case "$verb" in
'created')
# wait for a PID-file to pop into existence.
test -s "$pid_file_path" && i='' && break
;;
'removed')
# wait for this PID-file to disappear
test ! -s "$pid_file_path" && i='' && break
;;
*)
echo "wait_for_pid () usage: wait_for_pid created|removed pid pid_file_path"
exit 1
;;
esac
# if server isn't running, then pid-file will never be updated
if test -n "$pid"; then
if kill -0 "$pid" 2>/dev/null; then
: # the server still runs
else
# The server may have exited between the last pid-file check and now.
if test -n "$avoid_race_condition"; then
avoid_race_condition=""
continue # Check again.
fi
# there's nothing that will affect the file.
log_failure_msg "The server quit without updating PID file ($pid_file_path)."
return 1 # not waiting any more.
fi
fi
echo $echo_n ".$echo_c"
i=`expr $i + 1`
sleep 1
done
if test -z "$i" ; then
log_success_msg
return 0
else
log_failure_msg
return 1
fi
}
# Get arguments from the my.cnf file,
# the only group, which is read from now on is [mysqld]
if test -x ./bin/my_print_defaults
then
print_defaults="./bin/my_print_defaults"
elif test -x $bindir/my_print_defaults
then
print_defaults="$bindir/my_print_defaults"
elif test -x $bindir/mysql_print_defaults
then
print_defaults="$bindir/mysql_print_defaults"
else
# Try to find basedir in /etc/my.cnf
conf=/etc/my.cnf
print_defaults=
if test -r $conf
then
subpat='^[^=]*basedir[^=]*=\(.*\)$'
dirs=`sed -e "/$subpat/!d" -e 's//\1/' $conf`
for d in $dirs
do
d=`echo $d | sed -e 's/[ ]//g'`
if test -x "$d/bin/my_print_defaults"
then
print_defaults="$d/bin/my_print_defaults"
break
fi
if test -x "$d/bin/mysql_print_defaults"
then
print_defaults="$d/bin/mysql_print_defaults"
break
fi
done
fi
# Hope it's in the PATH ... but I doubt it
test -z "$print_defaults" && print_defaults="my_print_defaults"
fi
#
# Read defaults file from 'basedir'. If there is no defaults file there
# check if it's in the old (depricated) place (datadir) and read it from there
#
extra_args=""
if test -r "$basedir/my.cnf"
then
extra_args="-e $basedir/my.cnf"
fi
parse_server_arguments `$print_defaults $extra_args mysqld server mysql_server mysql.server`
#
# Set pid file if not given
#
if test -z "$mysqld_pid_file_path"
then
mysqld_pid_file_path=$datadir/`@HOSTNAME@`.pid
else
case "$mysqld_pid_file_path" in
/* ) ;;
* ) mysqld_pid_file_path="$datadir/$mysqld_pid_file_path" ;;
esac
fi
case "$mode" in
'start')
# Start daemon
# Safeguard (relative paths, core dumps..)
cd $basedir
echo $echo_n "Starting MySQL"
if test -x $bindir/mysqld_safe
then
# Give extra arguments to mysqld with the my.cnf file. This script
# may be overwritten at next upgrade.
$bindir/mysqld_safe --datadir="$datadir" --pid-file="$mysqld_pid_file_path" $other_args >/dev/null &
wait_for_pid created "$!" "$mysqld_pid_file_path"; return_value=$?
# Make lock for RedHat / SuSE
if test -w "$lockdir"
then
touch "$lock_file_path"
fi
exit $return_value
else
log_failure_msg "Couldn't find MySQL server ($bindir/mysqld_safe)"
fi
;;
'stop')
# Stop daemon. We use a signal here to avoid having to know the
# root password.
if test -s "$mysqld_pid_file_path"
then
# signal mysqld_safe that it needs to stop
touch "$mysqld_pid_file_path.shutdown"
mysqld_pid=`cat "$mysqld_pid_file_path"`
if (kill -0 $mysqld_pid 2>/dev/null)
then
echo $echo_n "Shutting down MySQL"
kill $mysqld_pid
# mysqld should remove the pid file when it exits, so wait for it.
wait_for_pid removed "$mysqld_pid" "$mysqld_pid_file_path"; return_value=$?
else
log_failure_msg "MySQL server process #$mysqld_pid is not running!"
rm "$mysqld_pid_file_path"
fi
# Delete lock for RedHat / SuSE
if test -f "$lock_file_path"
then
rm -f "$lock_file_path"
fi
exit $return_value
else
log_failure_msg "MySQL server PID file could not be found!"
fi
;;
'restart')
# Stop the service and regardless of whether it was
# running or not, start it again.
if $0 stop $other_args; then
$0 start $other_args
else
log_failure_msg "Failed to stop running server, so refusing to try to start."
exit 1
fi
;;
'reload'|'force-reload')
if test -s "$mysqld_pid_file_path" ; then
read mysqld_pid < "$mysqld_pid_file_path"
kill -HUP $mysqld_pid && log_success_msg "Reloading service MySQL"
touch "$mysqld_pid_file_path"
else
log_failure_msg "MySQL PID file could not be found!"
exit 1
fi
;;
'status')
# First, check to see if pid file exists
if test -s "$mysqld_pid_file_path" ; then
read mysqld_pid < "$mysqld_pid_file_path"
if kill -0 $mysqld_pid 2>/dev/null ; then
log_success_msg "MySQL running ($mysqld_pid)"
exit 0
else
log_failure_msg "MySQL is not running, but PID file exists"
exit 1
fi
else
# Try to find appropriate mysqld process
mysqld_pid=`pidof $libexecdir/mysqld`
# test if multiple pids exist
pid_count=`echo $mysqld_pid | wc -w`
if test $pid_count -gt 1 ; then
log_failure_msg "Multiple MySQL running but PID file could not be found ($mysqld_pid)"
exit 5
elif test -z $mysqld_pid ; then
if test -f "$lock_file_path" ; then
log_failure_msg "MySQL is not running, but lock file ($lock_file_path) exists"
exit 2
fi
log_failure_msg "MySQL is not running"
exit 3
else
log_failure_msg "MySQL is running but PID file could not be found"
exit 4
fi
fi
;;
*)
# usage
basename=`basename "$0"`
echo "Usage: $basename {start|stop|restart|reload|force-reload|status} [ MySQL server options ]"
exit 1
;;
esac
exit 0
|
m4734/mysql_pio
|
support-files/mysql.server.sh
|
Shell
|
gpl-2.0
| 10,835 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2009-2022 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# If an explicit file is missing, an error should happen
. ./tup.sh
cat > Tupfile << HERE
: foo.c |> echo gcc -c %f |>
HERE
# Note: Not touching foo.c
update_fail
eotup
|
gittup/tup
|
test/t2006-missing-file.sh
|
Shell
|
gpl-2.0
| 920 |
#!/bin/bash
# Try to create JIS-encoded channel containing ',' to see if JIS support works as expected
# $Id: jistest.sh 99 2010-01-19 22:14:36Z karel.tuma $
if (echo "USER sd 0 sd :sd"; echo "NICK sd--"; /bin/echo -e "JOIN #\033\$Btest,test2\033(B"; echo QUIT) | nc localhost 6607 | grep "test,test"; then
echo "JP test succeeded"
else
echo "JP test failed"
fi
|
asterIRC/ircd-ratbox-ircnet
|
contrib/testnet/jistest.sh
|
Shell
|
gpl-2.0
| 366 |
#!/bin/sh
# $Id: findversion.sh 22447 2011-05-13 17:52:35Z rubidium $
# This file is part of OpenCoaster Tycoon.
# OpenCoaster Tycoon is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 2.
# OpenCoaster Tycoon is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenCoaster Tycoon. If not, see <http://www.gnu.org/licenses/>.
# Arguments given? Show help text.
if [ "$#" != "0" ]; then
cat <<EOF
Usage: ./findversion.sh
Finds the current revision and if the code is modified.
Output: <REV>\t<REV_NR>\t<MODIFIED>\t<CLEAN_REV>
REV
a string describing what version of the code the current checkout is
based on. The exact format of this string depends on the version
control system in use, but it tries to identify the revision used as
close as possible (using the svn revision number or hg/git hash).
This also includes an indication of whether the checkout was
modified and which branch was checked out. This value is not
guaranteed to be sortable, but is mainly meant for identifying the
revision and user display.
If no revision identifier could be found, this is left empty.
REV_NR
the revision number of the svn revision this checkout is based on.
This can be used to determine which functionality is present in this
checkout. For trunk svn checkouts and hg/git branches based upon it,
this number should be accurate. For svn branch checkouts, this
number is mostly meaningless, at least when comparing with the
REV_NR from other branches or trunk.
This number should be sortable. Within a given branch or trunk, a
higher number means a newer version. However, when using git or hg,
this number will not increase on new commits.
If no revision number could be found, this is left empty.
MODIFIED
Whether (the src directory of) this checkout is modified or not. A
value of 0 means not modified, a value of 2 means it was modified.
Modification is determined in relation to the commit identified by
REV, so not in relation to the svn revision identified by REV_NR.
A value of 1 means that the modified status is unknown, because this
is not an svn/git/hg checkout for example.
CLEAN_REV
the same as REV but without branch name
By setting the AWK environment variable, a caller can determine which
version of "awk" is used. If nothing is set, this script defaults to
"awk".
EOF
exit 1;
fi
# Allow awk to be provided by the caller.
if [ -z "$AWK" ]; then
AWK=awk
fi
# Find out some dirs
cd `dirname "$0"`
ROOT_DIR=`pwd`
# Determine if we are using a modified version
# Assume the dir is not modified
MODIFIED="0"
if [ -d "$ROOT_DIR/.svn" ]; then
# We are an svn checkout
if [ -n "`svnversion | grep 'M'`" ]; then
MODIFIED="2"
fi
# Find the revision like: rXXXXM-branch
BRANCH=`LC_ALL=C svn info | "$AWK" '/^URL:.*branches/ { split($2, a, "/"); for(i in a) if (a[i]=="branches") { print a[i+1]; break } }'`
TAG=`LC_ALL=C svn info | "$AWK" '/^URL:.*tags/ { split($2, a, "/"); for(i in a) if (a[i]=="tags") { print a[i+1]; break } }'`
REV_NR=`LC_ALL=C svn info | "$AWK" '/^Last Changed Rev:/ { print $4 }'`
if [ -n "$TAG" ]; then
REV=$TAG
else
REV="r$REV_NR"
fi
elif [ -d "$ROOT_DIR/.git" ]; then
# We are a git checkout
# Refresh the index to make sure file stat info is in sync, then look for modifications
git update-index --refresh >/dev/null
if [ -n "`git diff-index HEAD`" ]; then
MODIFIED="2"
fi
HASH=`LC_ALL=C git rev-parse --verify HEAD 2>/dev/null`
REV="g`echo $HASH | cut -c1-8`"
BRANCH="`git symbolic-ref -q HEAD 2>/dev/null | sed 's@.*/@@;s@^master$@@'`"
REV_NR=`LC_ALL=C git log --pretty=format:%s --grep="^(svn r[0-9]*)" -1 | sed "s@.*(svn r\([0-9]*\)).*@\1@"`
if [ -z "$REV_NR" ]; then
# No rev? Maybe it is a custom git-svn clone
REV_NR=`LC_ALL=C git log --pretty=format:%b --grep="git-svn-id:.*@[0-9]*" -1 | sed "s@.*\@\([0-9]*\).*@\1@"`
fi
TAG="`git name-rev --name-only --tags --no-undefined HEAD 2>/dev/null | sed 's@\^0$@@'`"
if [ -n "$TAG" ]; then
BRANCH=""
REV="$TAG"
fi
elif [ -d "$ROOT_DIR/.hg" ]; then
# We are a hg checkout
if [ -n "`hg status | grep -v '^?'`" ]; then
MODIFIED="2"
fi
HASH=`LC_ALL=C hg id -i | cut -c1-12`
REV="h`echo $HASH | cut -c1-8`"
BRANCH="`hg branch | sed 's@^default$@@'`"
TAG="`hg id -t | grep -v 'tip$'`"
if [ -n "$TAG" ]; then
BRANCH=""
REV="$TAG"
fi
REV_NR=`LC_ALL=C hg log -f -k "(svn r" -l 1 --template "{desc|firstline}\n" | grep "^(svn r[0-9]*)" | sed "s@.*(svn r\([0-9]*\)).*@\1@"`
if [ -z "$REV_NR" ]; then
# No rev? Maybe it is a custom hgsubversion clone
REV_NR=`LC_ALL=C hg parent --template="{svnrev}"`
fi
elif [ -f "$ROOT_DIR/.ottdrev" ]; then
# We are an exported source bundle
cat $ROOT_DIR/.ottdrev
exit
else
# We don't know
MODIFIED="1"
BRANCH=""
REV=""
REV_NR=""
fi
if [ "$MODIFIED" -eq "2" ]; then
REV="${REV}M"
fi
CLEAN_REV=${REV}
if [ -n "$BRANCH" ]; then
REV="${REV}-$BRANCH"
fi
echo "$REV $REV_NR $MODIFIED $CLEAN_REV"
|
ShaunOfTheLive/OpenCoasterTycoon
|
findversion.sh
|
Shell
|
gpl-2.0
| 5,493 |
#!/usr/bin/env sh
# THIS IS AN AUTO-GENERATED FILE
# IT IS UNLIKELY YOU WANT TO EDIT THIS FILE BY HAND
# IF YOU WANT TO CHANGE THE ROS ENVIRONMENT VARIABLES
# USE THE rosinstall OR rosws TOOL INSTEAD.
# Generator version: 0.7.5
# see: http://www.ros.org/wiki/rosinstall
# This setup.sh file has to parse .rosinstall file, and source similar
# setup.sh files recursively. In the course of recursion, shell
# variables get overwritten. This means that when returning from
# recursion, any variable may be in a different state
# These variables accumulate data through recursion and must only be
# reset and unset at the top level of recursion.
if [ x"$_ROSINSTALL_IN_RECURSION" != x"recurse" ] ; then
# reset setupfile accumulator
_SETUPFILES_ROSINSTALL=
_ROS_PACKAGE_PATH_ROSINSTALL=
# reset RPP before sourcing other setup files
export ROS_PACKAGE_PATH=
fi
export ROS_WORKSPACE=/home/jgflores/code/Golfred/golem/jade_workspace
if [ ! "$ROS_MASTER_URI" ] ; then export ROS_MASTER_URI=http://localhost:11311 ; fi
unset ROS_ROOT
unset _SETUP_SH_ERROR
# python script to read .rosinstall even when rosinstall is not installed
# this files parses the .rosinstall and sets environment variables accordingly
# The ROS_PACKAGE_PATH contains all elements in reversed order (for historic reasons)
# We store into _PARSED_CONFIG the result of python code,
# which is the ros_package_path and the list of setup_files to source
# Using python here to benefit of the pyyaml library
export _PARSED_CONFIG=`/usr/bin/env python << EOPYTHON
import sys
import os
import yaml
workspace_path = os.environ.get('ROS_WORKSPACE', os.path.abspath('.'))
filename = os.path.join(workspace_path, '.rosinstall')
if not os.path.isfile(filename):
print('ERROR')
sys.exit("There is no file at %s" % filename)
with open(filename, "r") as fhand:
try:
v = fhand.read();
except Exception as e:
print('ERROR')
sys.exit("Failed to read file: %s %s " % (filename, str(e)))
try:
y = yaml.load(v);
except Exception as e:
print('ERROR')
sys.exit("Invalid yaml in %s: %s " % (filename, str(e)))
if y is not None:
# put all non-setupfile entries into ROS_PACKAGE_PATH
paths = []
for vdict in y:
for k, v in vdict.items():
if v is not None and k != "setup-file":
path = os.path.join(workspace_path, v['local-name'])
if not os.path.isfile(path):
# add absolute path from workspace to relative paths
paths.append(os.path.normpath(path))
else:
print('ERROR')
sys.exit("ERROR: referenced path is a file, not a folder: %s" % path)
output = ''
# add paths in reverse order
if len(paths) > 0:
output += ':'.join(reversed(paths))
# We also want to return the location of any setupfile elements
output += 'ROSINSTALL_PATH_SETUPFILE_SEPARATOR'
setupfile_paths = []
for vdict in y:
for k, v in vdict.items():
if v is not None and k == "setup-file":
path = os.path.join(workspace_path, v['local-name'])
if not os.path.exists(path):
print('ERROR')
sys.exit("WARNING: referenced setupfile does not exist: %s" % path)
elif os.path.isfile(path):
setupfile_paths.append(path)
else:
print('ERROR')
sys.exit("ERROR: referenced setupfile is a folder: %s" % path)
output += ':'.join(setupfile_paths)
# printing will store the result in the variable
print(output)
EOPYTHON`
if [ x"$_PARSED_CONFIG" = x"ERROR" ]; then
echo 'Could not parse .rosinstall file' 1<&2
_SETUP_SH_ERROR=1
fi
# using sed to split up ros_package_path and setupfile results
_ROS_PACKAGE_PATH_ROSINSTALL_NEW=`echo "$_PARSED_CONFIG" | sed 's,\(.*\)ROSINSTALL_PATH_SETUPFILE_SEPARATOR\(.*\),\1,'`
if [ ! -z "$_ROS_PACKAGE_PATH_ROSINSTALL_NEW" ]; then
if [ ! -z "$_ROS_PACKAGE_PATH_ROSINSTALL" ]; then
export _ROS_PACKAGE_PATH_ROSINSTALL=$_ROS_PACKAGE_PATH_ROSINSTALL:$_ROS_PACKAGE_PATH_ROSINSTALL_NEW
else
export _ROS_PACKAGE_PATH_ROSINSTALL=$_ROS_PACKAGE_PATH_ROSINSTALL_NEW
fi
fi
_SETUPFILES_ROSINSTALL_NEW=`echo "$_PARSED_CONFIG" | sed 's,\(.*\)'ROSINSTALL_PATH_SETUPFILE_SEPARATOR'\(.*\),\2,'`
if [ ! -z "$_SETUPFILES_ROSINSTALL_NEW" ]; then
if [ ! -z "$_SETUPFILES_ROSINSTALL" ]; then
_SETUPFILES_ROSINSTALL=$_SETUPFILES_ROSINSTALL_NEW:$_SETUPFILES_ROSINSTALL
else
_SETUPFILES_ROSINSTALL=$_SETUPFILES_ROSINSTALL_NEW
fi
fi
unset _PARSED_CONFIG
# colon separates entries
_LOOP_SETUP_FILE=`echo $_SETUPFILES_ROSINSTALL | sed 's,\([^:]*\)[:]\(.*\),\1,'`
# this loop does fake recursion, as the called setup.sh may work on
# the remaining elements in the _SETUPFILES_ROSINSTALL stack
while [ ! -z "$_LOOP_SETUP_FILE" ]
do
# need to pop from stack before recursing, as chained setup.sh might rely on this
_SETUPFILES_ROSINSTALL=`echo $_SETUPFILES_ROSINSTALL | sed 's,\([^:]*[:]*\),,'`
if [ -f "$_LOOP_SETUP_FILE" ]; then
_ROSINSTALL_IN_RECURSION=recurse
. $_LOOP_SETUP_FILE
unset _ROSINSTALL_IN_RECURSION
else
echo warn: no such file : "$_LOOP_SETUP_FILE"
fi
_LOOP_SETUP_FILE=`echo $_SETUPFILES_ROSINSTALL | sed 's,\([^:]*\)[:]\(.*\),\1,'`
done
unset _LOOP_SETUP_FILE
unset _SETUPFILES_ROSINSTALL
# prepend elements from .rosinstall file to ROS_PACKAGE_PATH
# ignoring duplicates entries from value set by setup files
export ROS_PACKAGE_PATH=`/usr/bin/env python << EOPYTHON
import os
ros_package_path = os.environ.get('ROS_PACKAGE_PATH', '')
original_elements = ros_package_path.split(':')
ros_package_path2 = os.environ.get('_ROS_PACKAGE_PATH_ROSINSTALL', '')
new_elements = ros_package_path2.split(':')
new_elements = [path for path in new_elements if path]
for original_path in original_elements:
if original_path and original_path not in new_elements:
new_elements.append(original_path)
print(':'.join(new_elements))
EOPYTHON`
unset _ROS_PACKAGE_PATH_ROSINSTALL
# restore ROS_WORKSPACE in case other setup.sh changed/unset it
export ROS_WORKSPACE=/home/jgflores/code/Golfred/golem/jade_workspace
# if setup.sh did not set ROS_ROOT (pre-fuerte)
if [ -z "${ROS_ROOT}" ]; then
# using ROS_ROOT now being in ROS_PACKAGE_PATH
export _ROS_ROOT_ROSINSTALL=`/usr/bin/env python << EOPYTHON
import sys, os;
if 'ROS_PACKAGE_PATH' in os.environ:
pkg_path = os.environ['ROS_PACKAGE_PATH']
for path in pkg_path.split(':'):
if (os.path.basename(path) == 'ros'
and os.path.isfile(os.path.join(path, 'stack.xml'))):
print(path)
break
EOPYTHON`
if [ ! -z "${_ROS_ROOT_ROSINSTALL}" ]; then
export ROS_ROOT=$_ROS_ROOT_ROSINSTALL
export PATH=$ROS_ROOT/bin:$PATH
export PYTHONPATH=$ROS_ROOT/core/roslib/src:$PYTHONPATH
fi
unset _ROS_ROOT_ROSINSTALL
fi
if [ ! -z "$_SETUP_SH_ERROR" ]; then
# return failure code when sourcing file
false
fi
|
rcln/Golfred
|
golem/jade_workspace/setup.sh
|
Shell
|
gpl-2.0
| 6,802 |
# Installing the virtualbox guest additions
VBOX_VERSION=$(cat $HOME/.vbox_version)
cd /tmp
mount -o loop $HOME/VBoxGuestAdditions_$VBOX_VERSION.iso /mnt
sh /mnt/VBoxLinuxAdditions.run
umount /mnt
rm -rf $HOME/VBoxGuestAdditions_*.iso
|
mvj3/install
|
definitions/hadoop_centos/virtualbox.sh
|
Shell
|
gpl-2.0
| 236 |
#!/bin/bash
# TRL = 4
#set -x
base_dir=$PWD
# Parámetros comunes
CABECERA_H1=9040
CABECERA_H2=4528
CABECERA_H3=2272
CABECERA_H4=1144
CABECERA_L=1144
PICTURES=129
# Parámetros video
parametrosCIFx30 () {
X_DIM=352
Y_DIM=288
FPS=30
BLOCK_SIZE=152064
DURATION=`echo "$PICTURES/$FPS" | bc -l` #segundos
}
parametros4CIFx30 () {
X_DIM=704
Y_DIM=576
FPS=30
BLOCK_SIZE=608256
DURATION=`echo "$PICTURES/$FPS" | bc -l`
}
parametros720px50 () { # 736 p
X_DIM=1280
Y_DIM=736
FPS=50
BLOCK_SIZE=1413120
DURATION=`echo "$PICTURES/$FPS" | bc -l`
}
parametros1080px50 () { # 1088 p
X_DIM=1920
Y_DIM=1088
FPS=50
BLOCK_SIZE=3133440
DURATION=`echo "$PICTURES/$FPS" | bc -l`
}
parametros1080px25 () { # 1088 p
X_DIM=1920
Y_DIM=1088
FPS=25
BLOCK_SIZE=3133440
DURATION=`echo "$PICTURES/$FPS" | bc -l`
}
parametrosCIFx30
#parametros4CIFx30
#parametros720px50
#parametros1080px50
#parametros1080px25
VIDEO=crew_352x288x30x420x300.yuv
carpeta="codestream_3/data-FD-MCJ2K-buscaRate_129-crew_352x288x30x420x300.sh_44000.32.32.4.2.2.4.3"
carpeta2="codestream_3/data-FD-MCJ2K-buscaRate_129-crew_352x288x30x420x300.sh_44000.32.32.1.2.2.4.3"
############################## ##############################
# FUNCIONES #
############################## ##############################
slopesVariables_aTRL(){
# ENTRADA SLOPES
comentario(){
slope_H1=$1
slope_L=$2
}
slope_H1=$1
slope_H2=$2
slope_H3=$3
slope_L=$4
# CARPETA 1: CODIFICACIÓN n SLOPES
cd $base_dir/$carpeta
comentario(){
mcj2k texture_compress_hfb_j2k --file=high_1 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H1 --subband=1 --temporal_levels=2 --spatial_levels=3
mcj2k texture_compress_lfb_j2k --file=low_1 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_L --temporal_levels=2 --spatial_levels=3
mcj2k texture_compress_hfb_j2k --file=high_1 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H1 --subband=1 --temporal_levels=5 --spatial_levels=3
mcj2k texture_compress_hfb_j2k --file=high_2 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H2 --subband=2 --temporal_levels=5 --spatial_levels=3
mcj2k texture_compress_hfb_j2k --file=high_3 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H3 --subband=3 --temporal_levels=5 --spatial_levels=3
mcj2k texture_compress_hfb_j2k --file=high_4 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H4 --subband=4 --temporal_levels=5 --spatial_levels=3
mcj2k texture_compress_lfb_j2k --file=low_4 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_L --temporal_levels=5 --spatial_levels=3
}
# !
mcj2k texture_compress_hfb_j2k --file=high_1 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H1 --subband=1 --temporal_levels=4 --spatial_levels=3
mcj2k texture_compress_hfb_j2k --file=high_2 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H2 --subband=2 --temporal_levels=4 --spatial_levels=3
mcj2k texture_compress_hfb_j2k --file=high_3 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_H3 --subband=3 --temporal_levels=4 --spatial_levels=3
mcj2k texture_compress_lfb_j2k --file=low_3 --pictures=129 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_L --temporal_levels=4 --spatial_levels=3
rm -rf tmp; mkdir tmp ; cp *.mjc *type* tmp/
# mkdir $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_H4.$slope_L ; cp *.mjc $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_H4.$slope_L
mkdir $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_L ; cp *.mjc $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_L
cd tmp/
# info = Calcula bit-kbps
TotalBytes=0
for Bytes in `ls -lR | grep -v ^d | awk '{print $5}'`; do
let TotalBytes=$TotalBytes+$Bytes
done
kbps=`echo "$TotalBytes*8/$DURATION/1000" | bc -l`
# expand
# !
mcj2k expand --block_overlaping=2 --block_size=32 --block_size_min=32 --layers=10 --pictures=129 --temporal_levels=4 --pixels_in_x=352 --pixels_in_y=288 --subpixel_accuracy=2 --search_range=4
# RD
RMSE=`snr --block_size=$BLOCK_SIZE --file_A=$DATA/VIDEOS/$VIDEO --file_B=low_0 2> /dev/null | grep RMSE | cut -f 3`
# Resultado pesos.dat:
slope_prop_1=`echo "$slope_H1/$slope_H2" | bc -l`
slope_prop_2=`echo "$slope_H2/$slope_H3" | bc -l`
slope_prop_3=`echo "$slope_H3/$slope_L" | bc -l`
# slope_prop_3=`echo "$slope_H3/$slope_H4" | bc -l`
# slope_prop_4=`echo "$slope_H4/$slope_L" | bc -l`
if [ $RMSE_anterior = 0 ] ; then
pendiente=0
else
diferencia_RMSE=`echo "$RMSE_anterior-$RMSE" | bc -l`
diferencia_kbps=`echo "$kbps-$kbps_anterior" | bc -l`
if [ $diferencia_kbps != 0 ] ; then
pendiente=`echo "$diferencia_RMSE/$diferencia_kbps" | bc -l`
fi
fi
RMSE_anterior=$RMSE
kbps_anterior=$kbps
# cd $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_H4.$slope_L
cd $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_L
ls -l >> $base_dir/pesos_$subbanda.dat
ls -l > $base_dir/pantalla_$subbanda.dat
# coge los pesos
peso_H1=`ls -l high_1.mjc | awk '{print $5}'` #peso_L=`ls -l low_1.mjc | awk '{print $5}'`
peso_H2=`ls -l high_2.mjc | awk '{print $5}'`
peso_H3=`ls -l high_3.mjc | awk '{print $5}'`
peso_L=`ls -l low_3.mjc | awk '{print $5}'`
# peso_H4=`ls -l high_4.mjc | awk '{print $5}'`
# peso_L=`ls -l low_4.mjc | awk '{print $5}'`
# resta cabeceras
let peso_H1=$peso_H1-$CABECERA_H1
let peso_H2=$peso_H2-$CABECERA_H2
let peso_H3=$peso_H3-$CABECERA_H3
# let peso_H4=$peso_H4-$CABECERA_H4
let peso_L=$peso_L-$CABECERA_L
# rm -rf $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_H4.$slope_L
rm -rf $base_dir/tmps/tmp.$slope_H1.$slope_H2.$slope_H3.$slope_L
# CARPETA 2: CODIFICACIÓN 1 SLOPES
# 1 TRL para 1º frame
cd $base_dir/$carpeta2
# !
mcj2k compress --block_overlaping=2 --block_size=32 --block_size_min=32 --slopes=$slope_L --pictures=1 --temporal_levels=1 --pixels_in_x=352 --pixels_in_y=288 --subpixel_accuracy=2 --search_range=4 --spatial_levels=3
mcj2k texture_compress --pictures=1 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_L --temporal_levels=1 --spatial_levels=3
mcj2k texture_compress_lfb_j2k --file=low_0 --pictures=1 --pixels_in_x=352 --pixels_in_y=288 --slopes=$slope_L --temporal_levels=1 --spatial_levels=3
# calcula pesos y proporciones
frame1=`ls -l low_0.mjc | awk '{print $5}'`
let L_sinframe1=$peso_L-$frame1
peso_prop_1=`echo "$peso_H1/$peso_H2" | bc -l`
peso_prop_2=`echo "$peso_H2/$peso_H3" | bc -l`
peso_prop_3=`echo "$peso_H3/$L_sinframe1" | bc -l`
# peso_prop_3=`echo "$peso_H3/$peso_H4" | bc -l`
# peso_prop_4=`echo "$peso_H4/$L_sinframe1" | bc -l`
# Resultados pesos.dat:
# echo "PESOS: frame_1: $frame1 low.mcj_sin_frame_1: $L_sinframe1 pesos_prop: $peso_prop_1 $peso_prop_2 $peso_prop_3 $peso_prop_4" >> $base_dir/pesos_$subbanda.dat
# echo -e "SLOPES: $slope_H1 $slope_H2 $slope_H3 $slope_H4 $slope_L \t slope_prop: $slope_prop_1 $slope_prop_2 $slope_prop_3 $slope_prop_4 \t PENDIENTE: $pendiente">> $base_dir/pesos_$subbanda.dat
echo "PESOS: frame_1: $frame1 low.mcj_sin_frame_1: $L_sinframe1 pesos_prop: $peso_prop_1 $peso_prop_2 $peso_prop_3" >> $base_dir/pesos_$subbanda.dat
echo -e "SLOPES: $slope_H1 $slope_H2 $slope_H3 $slope_L \t slope_prop: $slope_prop_1 $slope_prop_2 $slope_prop_3 \t PENDIENTE: $pendiente">> $base_dir/pesos_$subbanda.dat
echo -e "kbps= $kbps\tRMSE= $RMSE \t PENDIENTE: $pendiente">> $base_dir/pesos_$subbanda.dat
echo " " >> $base_dir/pesos_$subbanda.dat
echo " " >> $base_dir/pesos_$subbanda.dat
# Resultados coef.dat:
echo -e $slope_H1 $slope_H2 $slope_H3 $slope_L '\t\t' $kbps '\t' $RMSE '\t\t' $slope_prop_1 $slope_prop_2 $slope_prop_3 '\t\t' $pendiente >> $base_dir/coef_$subbanda.dat
# Resultados pantalla.dat:
cat $base_dir/pantalla_$subbanda.dat
echo -e "SLOPES: $slope_H1 $slope_H2 $slope_H3 $slope_L \n slope_prop: $slope_prop_1 $slope_prop_2 $slope_prop_3 \n PENDIENTE: $pendiente"
echo " "; echo " "; echo " "
rm $base_dir/pantalla_$subbanda.dat
}
############################## ##############################
# MAIN #
############################## ##############################
# $1 slope_inicial
# $2 coeficiente
# $3 siguiente coeficiente...
# LIMPIA
#rm *.dat
#echo "======================================== ARCHIVO DE PESOS ===================================" > pesos_$2.dat
rm -rf tmps; mkdir tmps;
# LLAMADAS
# H1 H2 H3 H4 L
#slopesVariables_aTRL $1 $2 $3 $4 $5
#slopesVariables_aTRL 65535 65535 65535 65535 65535
comentario(){
RMSE_anterior=0
kbps_anterior=0
subbanda=constantes
slopesVariables_aTRL 45000 45000 45000 45000
slopesVariables_aTRL 44900 44900 44900 44900
slopesVariables_aTRL 44800 44800 44800 44800
slopesVariables_aTRL 44700 44700 44700 44700
slopesVariables_aTRL 44600 44600 44600 44600
slopesVariables_aTRL 44500 44500 44500 44500
slopesVariables_aTRL 44400 44400 44400 44400
slopesVariables_aTRL 44300 44300 44300 44300
slopesVariables_aTRL 44200 44200 44200 44200
slopesVariables_aTRL 44100 44100 44100 44100
slopesVariables_aTRL 44000 44000 44000 44000
slopesVariables_aTRL 43900 43900 43900 43900
slopesVariables_aTRL 43800 43800 43800 43800
slopesVariables_aTRL 43700 43700 43700 43700
slopesVariables_aTRL 43600 43600 43600 43600
slopesVariables_aTRL 43500 43500 43500 43500
slopesVariables_aTRL 43400 43400 43400 43400
slopesVariables_aTRL 43300 43300 43300 43300
slopesVariables_aTRL 43200 43200 43200 43200
slopesVariables_aTRL 43100 43100 43100 43100
slopesVariables_aTRL 43000 43000 43000 43000
slopesVariables_aTRL 42900 42900 42900 42900
slopesVariables_aTRL 42800 42800 42800 42800
slopesVariables_aTRL 42700 42700 42700 42700
slopesVariables_aTRL 42600 42600 42600 42600
slopesVariables_aTRL 42500 42500 42500 42500
slopesVariables_aTRL 42400 42400 42400 42400
slopesVariables_aTRL 42300 42300 42300 42300
slopesVariables_aTRL 42200 42200 42200 42200
slopesVariables_aTRL 42100 42100 42100 42100
slopesVariables_aTRL 42000 42000 42000 42000
slopesVariables_aTRL 41900 41900 41900 41900
slopesVariables_aTRL 41800 41800 41800 41800
slopesVariables_aTRL 41700 41700 41700 41700
slopesVariables_aTRL 41600 41600 41600 41600
slopesVariables_aTRL 41500 41500 41500 41500
slopesVariables_aTRL 41400 41400 41400 41400
slopesVariables_aTRL 41300 41300 41300 41300
slopesVariables_aTRL 41200 41200 41200 41200
slopesVariables_aTRL 41100 41100 41100 41100
slopesVariables_aTRL 41000 41000 41000 41000
slopesVariables_aTRL 40900 40900 40900 40900
slopesVariables_aTRL 40800 40800 40800 40800
slopesVariables_aTRL 40700 40700 40700 40700
slopesVariables_aTRL 40600 40600 40600 40600
slopesVariables_aTRL 40500 40500 40500 40500
slopesVariables_aTRL 40400 40400 40400 40400
slopesVariables_aTRL 40300 40300 40300 40300
slopesVariables_aTRL 40200 40200 40200 40200
slopesVariables_aTRL 40100 40100 40100 40100
slopesVariables_aTRL 40000 40000 40000 40000
RMSE_anterior=0
kbps_anterior=0
subbanda=L
slopesVariables_aTRL 65535 65535 65535 45000
slopesVariables_aTRL 65535 65535 65535 44900
slopesVariables_aTRL 65535 65535 65535 44800
slopesVariables_aTRL 65535 65535 65535 44700
slopesVariables_aTRL 65535 65535 65535 44600
slopesVariables_aTRL 65535 65535 65535 44500
slopesVariables_aTRL 65535 65535 65535 44400
slopesVariables_aTRL 65535 65535 65535 44300
slopesVariables_aTRL 65535 65535 65535 44200
slopesVariables_aTRL 65535 65535 65535 44100
slopesVariables_aTRL 65535 65535 65535 44000
slopesVariables_aTRL 65535 65535 65535 43900
slopesVariables_aTRL 65535 65535 65535 43800
slopesVariables_aTRL 65535 65535 65535 43700
slopesVariables_aTRL 65535 65535 65535 43600
slopesVariables_aTRL 65535 65535 65535 43500
slopesVariables_aTRL 65535 65535 65535 43400
slopesVariables_aTRL 65535 65535 65535 43300
slopesVariables_aTRL 65535 65535 65535 43200
slopesVariables_aTRL 65535 65535 65535 43100
slopesVariables_aTRL 65535 65535 65535 43000
slopesVariables_aTRL 65535 65535 65535 42900
slopesVariables_aTRL 65535 65535 65535 42800
slopesVariables_aTRL 65535 65535 65535 42700
slopesVariables_aTRL 65535 65535 65535 42600
slopesVariables_aTRL 65535 65535 65535 42500
slopesVariables_aTRL 65535 65535 65535 42400
slopesVariables_aTRL 65535 65535 65535 42300
slopesVariables_aTRL 65535 65535 65535 42200
slopesVariables_aTRL 65535 65535 65535 42100
slopesVariables_aTRL 65535 65535 65535 42000
slopesVariables_aTRL 65535 65535 65535 41900
slopesVariables_aTRL 65535 65535 65535 41800
slopesVariables_aTRL 65535 65535 65535 41700
slopesVariables_aTRL 65535 65535 65535 41600
slopesVariables_aTRL 65535 65535 65535 41500
slopesVariables_aTRL 65535 65535 65535 41400
slopesVariables_aTRL 65535 65535 65535 41300
slopesVariables_aTRL 65535 65535 65535 41200
slopesVariables_aTRL 65535 65535 65535 41100
slopesVariables_aTRL 65535 65535 65535 41000
slopesVariables_aTRL 65535 65535 65535 40900
slopesVariables_aTRL 65535 65535 65535 40800
slopesVariables_aTRL 65535 65535 65535 40700
slopesVariables_aTRL 65535 65535 65535 40600
slopesVariables_aTRL 65535 65535 65535 40500
slopesVariables_aTRL 65535 65535 65535 40400
slopesVariables_aTRL 65535 65535 65535 40300
slopesVariables_aTRL 65535 65535 65535 40200
slopesVariables_aTRL 65535 65535 65535 40100
slopesVariables_aTRL 65535 65535 65535 40000
}
iteraciones=2
divisores=(0.999 0.998 0.997 0.996 0.995 0.994 0.993 0.992 0.991 0.99)
decrementores=(0.0005 0.001 0.002)
#slopes_L=(45000 44800 44600 44300 44000 43700 43400 43100 42800 42500 42200 41800 41400 41000)
#slopes_L=(45000) # 1 # 2 (# codestream # iteraciones)
#slopes_L=(44800) # 2 # 2
slopes_L=(44600) # 3 # 2
#slopes_L=(44300) # 4 # 2
#slopes_L=(44000) # 5 # 2
#slopes_L=(43700) # 6 # 2
#slopes_L=(43400) # 7 # 3
#slopes_L=(43100) # 8 # 3
#slopes_L=(42800) # 9 # 4
#slopes_L=(42500) # 10 # 4
#slopes_L=(42200) # 11 # 5
#slopes_L=(41800) # 12 # 5
#slopes_L=(41400) # 13 # 6
#slopes_L=(41000) # 14 # 6
for L in "${slopes_L[@]}"; do
# El mismo divisor para cada subbanda.
for divisor in "${divisores[@]}"; do
RMSE_anterior=0
kbps_anterior=0
subbanda=$L'_'$divisor
H3=`echo "$L/$divisor" | bc`
H2=`echo "$H3/$divisor" | bc`
H1=`echo "$H2/$divisor" | bc`
for (( i=0; i <= $iteraciones; i++ )) ; do
slopesVariables_aTRL $H1 $H2 $H3 $L
H3=`echo "$H3/$divisor" | bc`
H2=`echo "$H2/$divisor" | bc`
H1=`echo "$H1/$divisor" | bc`
done
done
# Distinto divisor para cada subbanda.
for divisor in "${divisores[@]}"; do
for decrementor in "${decrementores[@]}"; do
RMSE_anterior=0
kbps_anterior=0
subbanda=$L'_'$divisor'_'$decrementor
divi1=`echo "$divisor-$decrementor" | bc -l`
divi2=`echo "$divi1-$decrementor" | bc -l`
divi3=`echo "$divi2-$decrementor" | bc -l`
H3=`echo "$L/$divi1" | bc`
H2=`echo "$H3/$divi2" | bc`
H1=`echo "$H2/$divi3" | bc`
for (( i=0; i <= $iteraciones; i++ )) ; do
slopesVariables_aTRL $H1 $H2 $H3 $L
H3=`echo "$H3/$divi1" | bc`
H2=`echo "$H2/$divi2" | bc`
H1=`echo "$H1/$divi3" | bc`
done
done
done
done
#set +x
|
vicente-gonzalez-ruiz/QSVC
|
trunk/tests/Control_BR_slopes/C_crew_cif/slopesTRL_3.sh
|
Shell
|
gpl-2.0
| 15,066 |
#!/bin/bash
#SBATCH --job-name=getMGRAST
#SBATCH --partition=compute
#SBATCH --time=84:00:00
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --output=getMGRAST_%j.out
#SBATCH --error=getMGRAST_%j.err
key=$1
mgid=$2
echo "$1 for $2"
srun curl -H "auth: $key" -H 'Accept-Encoding: gzip,deflate' "http://api.metagenomics.anl.gov/1/annotation/similarity/$mgid?source=SEED&type=organism&identity=60&length=15" -o "$mgid.seed"
srun curl -H "auth: $key" -H 'Accept-Encoding: gzip,deflate' "http://api.metagenomics.anl.gov/1/annotation/similarity/$mgid?source=SEED&type=function&identity=60&length=15" -o "$mgid.fseed"
srun curl -H "auth: $key" -H 'Accept-Encoding: gzip,deflate' "http://api.metagenomics.anl.gov/1/annotation/similarity/$mgid?source=KO&type=ontology&identity=60&length=15" -o "$mgid.ko"
|
Askarbek-orakov/ASAR
|
bash/getMGRAST.sh
|
Shell
|
gpl-3.0
| 806 |
#!/bin/bash
# Rss ativated for download new musics
# test n2
# data do ultimo update
$LAST_UPDATE =
RSS_START ()
{
rsstail \
--newer $LAST_UPDATE
--format
}
# validated if rsstail is installed
if [[ command_exists rsstail != 0 ]] ;then
RSS_START
else
echo "Please install rsstail for continue"
fi
|
thiago2roshi/my-dotfiles
|
bin/rss_update2.sh
|
Shell
|
gpl-3.0
| 329 |
#!/bin/sh
#
# Script TO grant privileges TO the bacula database
#
db_user=${db_user:-bacula}
pg_config=`which pg_config`
bindir=`pg_config --bindir`
PATH="$bindir:$PATH"
db_name=${db_name:-bacula}
psql -q -f - -d ${db_name} $* <<END-OF-DATA
SET client_min_messages=WARNING;
-- Grants for the database
ALTER DATABASE ${db_name} OWNER TO ${db_user} ;
-- Grants for tables
GRANT ALL ON webacula_client_acl TO ${db_user};
GRANT ALL ON webacula_command_acl TO ${db_user};
GRANT ALL ON webacula_dt_commands TO ${db_user};
GRANT ALL ON webacula_dt_resources TO ${db_user};
GRANT ALL ON webacula_fileset_acl TO ${db_user};
GRANT ALL ON webacula_job_acl TO ${db_user};
GRANT ALL ON webacula_job_size TO ${db_user};
GRANT ALL ON webacula_jobdesc TO ${db_user};
GRANT ALL ON webacula_logbook TO ${db_user};
GRANT ALL ON webacula_logtype TO ${db_user};
GRANT ALL ON webacula_php_session TO ${db_user};
GRANT ALL ON webacula_pool_acl TO ${db_user};
GRANT ALL ON webacula_resources TO ${db_user};
GRANT ALL ON webacula_roles TO ${db_user};
GRANT ALL ON webacula_storage_acl TO ${db_user};
GRANT ALL ON webacula_tmp_tablelist TO ${db_user};
GRANT ALL ON webacula_users TO ${db_user};
GRANT ALL ON webacula_version TO ${db_user};
GRANT ALL ON webacula_where_acl TO ${db_user};
-- Grants for sequences on those tables
GRANT SELECT, UPDATE ON webacula_client_acl_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_command_acl_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_dt_commands_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_dt_resources_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_fileset_acl_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_job_acl_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_jobdesc_desc_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_logbook_logid_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_logtype_typeid_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_pool_acl_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_resources_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_roles_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_storage_acl_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_tmp_tablelist_tmpid_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_users_id_seq TO ${db_user};
GRANT SELECT, UPDATE ON webacula_where_acl_id_seq TO ${db_user};
-- Grants for functions
GRANT EXECUTE ON FUNCTION webacula_clone_file(vTbl TEXT, vFileId INT, vPathId INT, vFilenameId INT, vLStat TEXT, vMD5 TEXT, visMarked INT, vFileSize INT) TO ${db_user};
GRANT EXECUTE ON FUNCTION elt(pos int4, str VARIADIC text[]) TO ${db_user};
GRANT EXECUTE ON FUNCTION base64_decode_lstat(pos int4, str varchar) TO ${db_user};
GRANT EXECUTE ON FUNCTION human_size(bytes numeric) TO ${db_user};
END-OF-DATA
if [ $? -eq 0 ]
then
echo "PostgreSQL: Privileges for user ${db_user} granted successfully on database ${db_name}."
else
echo "PostgreSQL: Privileges for user ${db_user} granted failed on database ${db_name}!"
exit 1
fi
exit 0
|
wanderleihuttel/webacula
|
install/PostgreSql/30_grant_postgresql_privileges.sh
|
Shell
|
gpl-3.0
| 3,156 |
#!/bin/bash
# License GPL, see LICENSE
# Written by Nanomad [condellog_At_gmail_dot_com]
if `gnome-terminal -e ./zenity_check`;
then zenity --info --title "Ubuntu Parental Control setup" --text $"This will install and configure a basic parental control feature for Ubuntu" && gksudo -t "Ubuntu Parental Contro setup" 'gnome-terminal -e ./.setup';
else exit 1;
fi
|
KIAaze/bin_and_dotfiles_public
|
bins/public_bin/Parental_control/install.sh
|
Shell
|
gpl-3.0
| 363 |
#!/bin/bash
#BSUB -J "pindel[1-573]"
#BSUB -q long
#BSUB -M 8000
#BSUB -n 8
#BSUB -R "select[mem>8000] rusage[mem=8000] span[ptile=8]"
#BSUB -e pindel.err.%I
#BSUB -o pindel.out.%I
cd /lustre/scratch116/casm/cgp/users/tn5/katarina/
TUMOUR=`ls bams/filtered/*.bam | head -n $LSB_JOBINDEX | tail -n 1`
NORMAL=bams/control/PD21369b.bam
SAMPLE=`basename $TUMOUR | sed 's/.bam//'`
REF=/lustre/scratch112/sanger/cgppipe/canpipe/live/ref/human/GRCh37d5
PINDEL="/software/CGP/canpipe/live/bin/canpipe_live pindel.pl -reference $REF/genome.fa -exclude NC_007605,hs37d5,GL% -simrep $REF/pindel/simpleRepeats.bed.gz -badloci $REF/caveman/flagging/hi_seq_depth.bed.gz -genes $REF/vagrent/e75/codingexon_regions.indel.bed.gz -unmatched $REF/pindel/pindel_np.gff3.gz -assembly GRCh37d5 -species Human -seqtype WGS -filter $REF/pindel/genomicRules.lst -softfil $REF/pindel/softRules.lst -tumour $TUMOUR -normal $NORMAL -outdir pindel/$SAMPLE -cpus 8"
mkdir pindel/$SAMPLE
$PINDEL
|
TravisCG/SI_scripts
|
pindel.sh
|
Shell
|
gpl-3.0
| 971 |
#!/usr/bin/env bash
# Copyright: Christoph Dittmann <[email protected]>
# License: GNU GPL, version 3 or later; http://www.gnu.org/copyleft/gpl.html
#
# Hiragana trainer.
DIRECTORY="$(dirname "$0")"
KANA_FILE="$DIRECTORY/hiragana.txt"
IRC_COMMAND='!hira'
# shellcheck source=kana/kana.sh
. "$(dirname "$0")/kana.sh"
|
Christoph-D/Japanese-Tools
|
kana/hira.sh
|
Shell
|
gpl-3.0
| 322 |
#!/bin/bash
VNCDISPLAY=$(ls $HOME/.vnc/*.log | sed -e "s/[^:]*://" -e "s/\.log$//")
vncserver -kill :$VNCDISPLAY
|
jcrodriguez-dis/vpl-xmlrpc-jail
|
vpl_vnc_stopper.sh
|
Shell
|
gpl-3.0
| 113 |
sudo chmod ugo+r /var/lib/command-not-found/commands.db
|
Erotemic/local
|
scripts/fix_command_not_found.sh
|
Shell
|
gpl-3.0
| 56 |
#! /bin/sh
#
# Copyright 2012 Andrew Gottemoller.
#
# This software is a copyrighted work licensed under the terms of the
# Secure Rsync license. Please consult the file "SR_LICENSE" for
# details.
gmake --directory=secure_rsync -r -R setup
|
agottem/secure_rsync
|
setup.sh
|
Shell
|
gpl-3.0
| 244 |
#!/bin/bash
# E-WPS - The easiest way to crack WPS
# Copyright (C) 2015-2016
# Script by Leprechaun
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# E-Mail: [email protected]
# PGP: https://pgp.mit.edu/pks/lookup?op=get&search=0x8FF24099181CE01E
# -------------------------------------------------------------------------------
#Controllo se l'utente è root
if [ “$(id -u)” != “0” ]; then
tput setaf 1; tput bold; echo -e '\nQuesto script va avviato come root\n' 2>&1
exit 1
fi
#Pulisco il terminale e mostro il logo
clear
tput setaf 3; tput bold;
echo ' ___________ __ ____________ _________'
echo ' \_ _____/ / \ / \ \/ _____/'
echo ' | __)_ ______ \ \/\/ /| ___/\_____ \'
echo ' | \ /_____/ \ / | | / \'
echo ' /_______ / \__/\ / |____| /_______ /'
echo ' \/ \/ \/'
tput setaf 1; tput bold;
echo -e "\n The easiest way to crack WPS"
#Inizio script
tput setaf 4; tput bold;
echo -e "\n\nVisualizzo gli adattatori di rete WiFi..."
tput setaf 7; tput bold;
ifconfig | grep "wl" -A 1
tput setaf 4; tput bold;
echo -e "\nQuale wlan vuoi utilizzare? (default wlan0)"
tput setaf 7; tput bold;
read wadapter
if [ -z "$wadapter" ]; then
wadapter=wlan0
fi
clear
#Controllo che non sia già in modalità monitor
statowlan=$(iwconfig $wadapter |grep -o "Mode:Monitor")
if [ -z "$statowlan" ]; then
tput setaf 2; tput bold;
echo "Interfaccia down..."
ip link set $wadapter down || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco a disattivare l'adattatore di rete" ; exit 1; }
sleep 2
echo "iwconfig monitor..."
iwconfig $wadapter mode monitor || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco ad inizializzare la modalità monitor" ; exit 1; }
sleep 2
echo "Random MAC..."
echo "DISABILITATO PER PROBLEMI"
echo "Skippo..."
#tput setaf 7; tput bold;
#macchanger -r $wadapter || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco a impostare un MAC casuale" ; exit 1; }
#sleep 2
tput setaf 2; tput bold;
echo "Interfaccia up..."
ip link set $wadapter up || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco a riattivare l'adattatore di rete" ; exit 1; }
sleep 2
else
#Chiedo se cambiare MAC a un interfaccia già in monitor
tput setaf 2; tput bold;
echo -e "\nL'adattore è già in modalità monitor, vuoi cambiare MAC?"
echo -e "\nc+enter per cambiare - enter per continuare con il MAC attuale"
tput setaf 7; tput bold;
read cambiomac
case $cambiomac in
c)
tput setaf 2; tput bold;
echo "Interfaccia down..."
ip link set $wadapter down || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco a disattivare l'adattatore di rete" ; exit 1; }
sleep 2
echo "Random MAC..."
tput setaf 7; tput bold;
macchanger -r $wadapter || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco a impostare un MAC casuale" ; exit 1; }
sleep 2
tput setaf 2; tput bold;
echo "Interfaccia up..."
ip link set $wadapter up || { tput setaf 1; tput bold; echo -e "\nErrore: non riesco a riattivare l'adattatore di rete" ; exit 1; }
sleep 2
;;
*)
clear
tput setaf 2; tput bold;
echo -e "\nContinuiamo..."
;;
esac
sleep 2
fi
#Visualizzo le impostazioni della rete
clear
tput setaf 4; tput bold;
echo -e "\nVisualizzo le impostazioni correnti:"
tput setaf 7; tput bold;
macchanger -s $wadapter |grep "Current MAC:"
echo "--------------------"
iwconfig $wadapter |grep "Mode:"
tput setaf 4; tput bold;
echo -e "\n~Terminato~"
sleep 2
#Controllo se esiste la cartella di lavoro altrimenti la creo
if [ -d "CrackedWifi" ]; then
tput setaf 2; tput bold;
echo -e "\nCartella di lavoro presente, proseguo..."
else
tput setaf 1; tput bold;
echo -e "\nCreo la cartella di lavoro..."
mkdir "CrackedWifi"
fi
#Ciclo di cracking
while true
do
#Imposto una trappola
int_trap() {
tput setaf 4; tput bold; echo -e "\nCtrl-C premuto"
}
trap int_trap INT
#Avvio Wash
tput setaf 7; tput bold;
wash -i $wadapter
#Richiedo i parametri per reaver
tput setaf 1; tput bold;
echo -e "\nMAC da attaccare:"
tput setaf 7;
read bssid
tput setaf 1; tput bold;
echo "Canale:"
tput setaf 7;
read chn
clear
#Controllo che la rete non sia già stata crackata
if [ -f CrackedWifi/${bssid} ]; then
tput setaf 1; tput bold;
echo -e "\nEsiste già un file legato a questa rete all'interno della directory di lavoro..."
else
#Avvio reaver
reaver -i $wadapter -b $bssid -c $chn -K 1 -vv |tee CrackedWifi/${bssid} && echo "Cracking completo" || { echo "Qualcosa è andato storto..." ; rm -f CrackedWifi/${bssid} ; }
fi
#Chiedo all'utente se uscire per ovviare alla trappola
tput setaf 4; tput bold;
echo -e "\nenter per continuare - e+enter per uscire"
tput setaf 7;
read scelta
case $scelta in
e)
exit
;;
*)
clear
tput setaf 2; tput bold;
echo -e "\nContinuiamo..."
;;
esac
#Ricomincio con il ciclo
done
|
Leproide/E-WPS
|
e-wps-nomacchange.sh
|
Shell
|
gpl-3.0
| 5,560 |
#!/bin/bash
set -ue
APP_NAME="withnicome"
SCRIPT_PATH="$(dirname "$(readlink -f "${0}")")/"
error_exit() {
printf "%s\n" "${*}" >&2
exit 1
}
# Check next version.
currentVer="$(sed -ne 's/.*[, \t]\+"version":\ \?"\([0-9.]\+\)",\?/\1/p' "${SCRIPT_PATH}manifest.json")"
printf "CURRENT_VERSION: %s\n" "${currentVer}"
# Use arg to next version
if test "${#}" -ne "0"; then
nextVer="${1}"
else
printf "NEXT_VERSION>>"
read nextVer
fi
# If $REPLY is blank, maintain the version number.
if test "${nextVer}" == ""; then
nextVer="${currentVer}"
printf "Version remains: %s\n" "${nextVer}" >&2
fi
# Update version number
printf "%s\n" "${nextVer}" |
grep -e '^[0-9]\+\(\.[0-9]\+\)*$' >/dev/null 2>&1 ||
error_exit "invalid version format"
# Compare as strings so these may be deciamals.
if test "${nextVer}" != "${currentVer}"; then
sed -i "s/\(.*\"version\":\ \?\"\)[0-9.]\+\(\",\?\)/\1${nextVer}\2/" "${SCRIPT_PATH}manifest.json"
fi
# Build
outDir="${SCRIPT_PATH}build/"
mkdir -p "${outDir}"
cd "${SCRIPT_PATH}" && zip -r "${outDir}${APP_NAME}-${nextVer}.xpi" icons/ content_script/ manifest.json
printf "build: %s\n" "${outDir}${APP_NAME}-${nextVer}.xpi" >&2
|
choco-la/nicomment
|
package.sh
|
Shell
|
gpl-3.0
| 1,201 |
#!/bin/bash
cd $HOME
# setup vagrant user as an admin of the system
bash /var/www/elmsln/scripts/install/users/elmsln-admin-user.sh
# refresh file so its good when next call uses it
source $HOME/.bashrc
# add vagrant to the elmsln group
sudo usermod -a -G elmsln vagrant
# set all permissions correctly and for vagrant user
sudo bash /var/www/elmsln/scripts/utilities/harden-security.sh vagrant
# disable varnish which the Cent 6.x image enables by default
# this way when we're doing local development we don't get cached anything
# port swap to not use varnish in local dev
sudo sed -i 's/Listen 8080/Listen 80/g' /etc/httpd/conf/httpd.conf
sudo sed -i 's/8080/80/g' /etc/httpd/conf.d/*.conf
sudo service varnish stop
sudo /etc/init.d/httpd restart
sudo /etc/init.d/mysqld restart
# disable varnish from starting automatically on reboot
sudo chkconfig varnish off
# Install front end stack in case users wish to develop with sass.
#curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.32.0/install.sh | bash
#source .bashrc
#nvm install 6.7
#cd /var/www/elmsln/core/dslmcode/shared/drupal-7.x/themes/elmsln_contrib/foundation_access/app
#npm update --save-dev
#npm install gulp --save-dev
#npm install -g bower
#npm install
#bower install
|
mmilutinovic1313/elmsln
|
scripts/vagrant/cleanup.sh
|
Shell
|
gpl-3.0
| 1,251 |
source ./scripts/dev/environment.sh
source ./scripts/dev/virtualize.sh
|
layuplist/layup-list
|
init.sh
|
Shell
|
gpl-3.0
| 70 |
#!/usr/bin/env bash
#set -xv
#####################################################################
#
# Name: Luke Collins
# Date: 19/03/2018
# Script Description: Script to enable cloudflare
# Version: 1.0
# Copyright: Copyright (C) 2017, 2018 Luke Collins - All Rights Reserved
#
# #########################################################
#
# Version Date Modifier Change
# 1.0 17/03/2018 Luke Initial release
#
###########################################################################
cd /opt/cloudflare
java -jar cloudflareClient.jar enable aoifebradley.net
java -jar cloudflareClient.jar enable aoifebradley.net
java -jar cloudflareClient.jar enable rccars.ie
java -jar cloudflareClient.jar enable www.rccars.ie
java -jar cloudflareClient.jar enable forum.rccars.ie
java -jar cloudflareClient.jar enable castleknockcomputing.com
java -jar cloudflareClient.jar enable www.castleknockcomputing.com
java -jar cloudflareClient.jar enable orcaireland.ie
java -jar cloudflareClient.jar enable www.orcaireland.ie
java -jar cloudflareClient.jar enable api.orcaireland.ie
java -jar cloudflareClient.jar enable forum.orcaireland.ie
java -jar cloudflareClient.jar enable events.orcaireland.ie
java -jar cloudflareClient.jar enable orcaireland.com
java -jar cloudflareClient.jar enable www.orcaireland.com
java -jar cloudflareClient.jar enable forum.orcaireland.com
java -jar cloudflareClient.jar enable lukecollins.net
java -jar cloudflareClient.jar enable www.lukecollins.net
java -jar cloudflareClient.jar enable blog.lukecollins.net
java -jar cloudflareClient.jar enable simplybeautifyme.com
java -jar cloudflareClient.jar enable www.simplybeautifyme.com
java -jar cloudflareClient.jar enable twitter.simplybeautifyme.com
java -jar cloudflareClient.jar enable twitter.simplybeautify.me
echo "Done"
systemctl start httpd
|
LukeCollins-net/Ops-Tools
|
cloud/vps1/scripts/cloudflare/enableCF.sh
|
Shell
|
gpl-3.0
| 1,974 |
#!/bin/bash
echoerr() { echo "$@" 1>&2; }
typ="$1"
entity="$2"
options="$3"
folder="$4"
path="$5"
[ -z "$typ" ] && echo "No class provided (e.g. Shop)"
[ -z "$entity" ] && echo "No entity type provided (node, way or relation)"
[ -z "$folder" ] && folder=`date +%F`
[ -z "$path" ] && path="../target/dump/"
targetDir="$path/$folder/"
mkdir -p "$targetDir"
#echoerr "Settings: $options | $folder | $path"
uri="http://linkedgeodata.org/ontology/$typ"
file="$folder-$typ.$entity.sorted.nt.bz2"
tempfile=`tempfile`
"./create-queries-${entity}s.sh" "$uri" | while read line; do
# echo "sparqlify-tool $options -Q '$line' | sort -u -S 1024M | rapper -i ntriples - http://example.org/ | pbzip2 -c > '$targetDir/$file'"
echo "sparqlify-tool $options -Q '$line' >> '$tempfile'"
done
echo "cat $tempfile | sort -u -S 1024M | rapper -i ntriples - http://example.org/ | pbzip2 -c > '$targetDir/$file'"
echo "rm '$tempfile'"
|
GeoKnow/LinkedGeoData
|
linkedgeodata-dump/bin/create-script-entity-type-tool.sh
|
Shell
|
gpl-3.0
| 928 |
EXP_PFILE=/home/michal/Dropbox/study/university/final_year/final-year-project/src/exp_params.txt
DEF_PFILE=/home/michal/Dropbox/study/university/final_year/final-year-project/src/params.txt
INDIR=/media/michal/Edison/fyp/new/morerandom/morerandom_params
STREAM_DIR=/media/michal/Edison/fyp/new/morerandom/morerandom_streams
GOODNESS_LOC=/media/michal/Edison/fyp/new/morerandom/
LAUNCHER_LOC=/home/michal/Dropbox/study/university/final_year/final-year-project/src/deltastream
OUTDIR=/media/michal/Edison/fyp/new/morerandom/morerandom_time_delay
FOLDER_PREFIX=alpha_
FUNC_PREFIX=f
BESTFILE=$OUTDIR/best_params.txt
NSTREAMS=2
NFUNCS=5
ALPHA_VALUES=(01 02 03) #(04 05 06 07 08 09 10 11 12 13 14 15)
while true; do
echo -e "Running this script will edit some parameters in the files $EXP_PFILE and $DEF_PFILE.\nResults will be output to $OUTDIR, stream data will be read from $STREAM_DIR,\nbest parameter information will be read from $INDIR.\nAre you sure you want to run it?"
read yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please enter Y or N.";;
esac
done
echo "" > $BESTFILE
# Get the names of each experiment from the experiment paramfile so we can check that they
# are set up to estimate the time delay
NPARAM="`grep experiment\_names $EXP_PFILE`"
PSTR=(${NPARAM// / })
ENAMES=(${PSTR[1]//,/ })
for V in ${ENAMES[@]}; do
# Change the experiment type to do experiments on the delay and not just functions.
sed -i "s/$V\_type [a-zA-Z]*/$V\_type delay/" $EXP_PFILE
done
# Time delay experiments are not to be run stuttered, so change that value too.
sed -i 's/run_stuttered [a-zA-Z]*/run_stuttered no/' $EXP_PFILE
for TD_EST_TYPE in pmf area; do
# Set the delta estimation method to the estimator type that we are using.
sed -i "s/delta_est_method [a-zA-Z]*/delta_est_method $TD_EST_TYPE/" $DEF_PFILE
for ALPHA in ${ALPHA_VALUES[@]}; do
for FUNC in $(seq 1 $NFUNCS); do
for TYPE in ${ENAMES[@]}; do
if [ ! -d "$OUTDIR/$TD_EST_TYPE/$FOLDER_PREFIX$ALPHA/$FUNC_PREFIX$FUNC" ]; then
mkdir -p $OUTDIR/$TD_EST_TYPE/$FOLDER_PREFIX$ALPHA/$FUNC_PREFIX$FUNC/$TYPE
fi
echo "alpha=$ALPHA, func=$FUNC, type=$TYPE, td_type=$TD_EST_TYPE" >> $BESTFILE
# Extract the goodness and experiment number from the aggregate file
RES="`cat $GOODNESS_LOC/agg_goodness_$TYPE.txt | grep -A 1 "alpha_$ALPHA func$FUNC" | sed '1d;'`"
GOODNESS_ARR=(${RES// / }) # split the above string on a space to get an array - exp num is at index 1
EXP_NUM=${GOODNESS_ARR[1]}
# Grep the parameters for the experiment type out of the experiment file
T_PARAM=`grep $TYPE\_params $EXP_PFILE`
# Split the whole parameter string on a space - there is one separating the parameter from the values
PARAM_STRING=(${T_PARAM// / })
# Split the first array location on commas, which are used to separate parameters
VALUES=(${PARAM_STRING[1]//,/ })
# Loop over each parameter name that is in the experiment
for VALUE in ${VALUES[@]}; do
# Get the value of the parameter in the best experiment for this alpha, func and type
BEST_P=`grep $VALUE $INDIR/$FOLDER_PREFIX$ALPHA/$FUNC_PREFIX$FUNC/$TYPE/experiment_$EXP_NUM/parameters.txt`
echo $BEST_P >> $BESTFILE
# Get the point in the experiment file at which the parameter declaration for the current parameter is found.
EXP_LINE=`grep -n "$VALUE " $EXP_PFILE`
# Split to get line number in index 0
EXP_L_SPLIT=(${EXP_LINE//:/ })
EXP_LINE=${EXP_L_SPLIT[0]}
# Replace the relevant line in the experiment parameter file to match the best parameter for this set
sed -i "$EXP_LINE s/.*/$BEST_P/" $EXP_PFILE
done
done
echo -e "\n" >> $BESTFILE
# At this point, we have an experiment parameter file modified to match the best parameters in the baseline and gaussian
# experiments on stuttered streams. We can now run the experiments with the best parameters.
$LAUNCHER_LOC -x $EXP_PFILE -p $DEF_PFILE -i $STREAM_DIR/$FOLDER_PREFIX$ALPHA/$FUNC_PREFIX$FUNC -o $OUTDIR/$TD_EST_TYPE/$FOLDER_PREFIX$ALPHA/$FUNC_PREFIX$FUNC -c $NFUNCS -n $NSTREAMS -t 3 -r
done
done
done
|
heuristicus/final-year-project
|
scripts/randfunc/runtd_exp.sh
|
Shell
|
gpl-3.0
| 4,177 |
#!/bin/bash
set -ex
CHROME_DOWNLOAD_PATH=~/Downloads/
if [ "$1" == "-r" ]; then
rm ${CHROME_DOWNLOAD_PATH}/LF*.html 2> /dev/null
else
echo -n "Copying html ... "
rm -rf ./html/ ./md/
mkdir -p ./html/{orig,clean}
mv ${CHROME_DOWNLOAD_PATH}/LF*.html html/orig #2> /dev/null
# rm html/clean/* 2> /dev/null
# rm -f md/* 2> /dev/null
cp html/orig/* html/clean/
rename 's/_popup.html$/_popup (0).html/' html/clean/*.html || true
echo "ok"
fi
echo -n "Cleaning html ... "
python clean.py
echo "ok"
echo -n "Creating result ... "
python join.py
echo "ok"
echo -n "Backing up original htmls ... "
mkdir -p out/htmls/
cp -pr html/orig/* out/htmls/
echo "ok"
|
kanner/lfs-crawler
|
run.sh
|
Shell
|
gpl-3.0
| 660 |
#!/bin/bash
#if [ -f /etc/apf/factory.conf ] ; then
# cp -f /etc/apf/factory.conf /etc/apf/factory.conf.bak
#fi
if id apf > /dev/null 2>&1; then
: # do nothing
else
/usr/sbin/useradd --comment "AutoPyFactory service account" --shell /bin/bash apf
fi
|
edquist/autopyfactory
|
misc/rpm-pre.sh
|
Shell
|
gpl-3.0
| 256 |
#!/bin/bash
#
# Unattended/SemiAutomated OpenStack Installer
# Danny j. Pérez M. perezdann at gmail dot com
# Based on 1.0.5 ubuntu16lts by Reynaldo R. Martinez P. TigerLinux at gmail dot com
#
# Main Installer Script
# Version: 1.0.6.deb8 "Daenerys"
# July 09, 2016
#
# OpenStack MITAKA for Debian 8
PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
#
# First, we source our config file and verify that some important proccess are
# already completed.
#
if [ -f ./configs/main-config.rc ]
then
source ./configs/main-config.rc
mkdir -p /etc/openstack-control-script-config
else
echo "Can't access my config file. Aborting !"
echo ""
exit 0
fi
if [ -f /etc/openstack-control-script-config/db-installed ]
then
echo ""
echo "DB Proccess OK. Let's continue"
echo ""
else
echo ""
echo "DB Proccess not completed. Aborting !"
echo ""
exit 0
fi
if [ -f /etc/openstack-control-script-config/keystone-installed ]
then
echo ""
echo "Keystone Proccess OK. Let's continue"
echo ""
else
echo ""
echo "Keystone Proccess not completed. Aborting !"
echo ""
exit 0
fi
if [ -f /etc/openstack-control-script-config/heat-installed ]
then
echo ""
echo "This module was already installed. Exiting !"
echo ""
exit 0
fi
echo ""
echo "Installing HEAT Packages"
#
# We proceed to install HEAT Packages non interactivelly
#
export DEBIAN_FRONTEND=noninteractive
DEBIAN_FRONTEND=noninteractive aptitude -y install heat-api heat-api-cfn heat-engine python-heatclient
DEBIAN_FRONTEND=noninteractive aptitude -y install heat-cfntools
DEBIAN_FRONTEND=noninteractive aptitude -y install python-zaqarclient python-manilaclient python-mistralclient
echo "Done"
echo ""
source $keystone_admin_rc_file
echo ""
echo "Configuring Heat"
echo ""
#
# We silentlly stop heat services
#
stop heat-api >/dev/null 2>&1
stop heat-api-cfn >/dev/null 2>&1
stop heat-engine >/dev/null 2>&1
systemctl stop heat-api >/dev/null 2>&1
systemctl stop heat-api-cfn >/dev/null 2>&1
systemctl stop heat-engine >/dev/null 2>&1
#
# By using python based tools, we proceed to configure heat.
#
chown -R heat.heat /etc/heat
echo "# Heat Main Config" >> /etc/heat/heat.conf
case $dbflavor in
"mysql")
crudini --set /etc/heat/heat.conf database connection mysql+pymysql://$heatdbuser:$heatdbpass@$dbbackendhost:$mysqldbport/$heatdbname
;;
"postgres")
crudini --set /etc/heat/heat.conf database connection postgresql+psycopg2://$heatdbuser:$heatdbpass@$dbbackendhost:$psqldbport/$heatdbname
;;
esac
crudini --set /etc/heat/heat.conf database retry_interval 10
crudini --set /etc/heat/heat.conf database idle_timeout 3600
crudini --set /etc/heat/heat.conf database min_pool_size 1
crudini --set /etc/heat/heat.conf database max_pool_size 10
crudini --set /etc/heat/heat.conf database max_retries 100
crudini --set /etc/heat/heat.conf database pool_timeout 10
crudini --set /etc/heat/heat.conf database backend heat.db.sqlalchemy.api
crudini --set /etc/heat/heat.conf DEFAULT host $heathost
crudini --set /etc/heat/heat.conf DEFAULT debug false
crudini --set /etc/heat/heat.conf DEFAULT verbose false
crudini --set /etc/heat/heat.conf DEFAULT log_dir /var/log/heat
crudini --set /etc/heat/heat.conf DEFAULT heat_metadata_server_url http://$heathost:8000
crudini --set /etc/heat/heat.conf DEFAULT heat_waitcondition_server_url http://$heathost:8000/v1/waitcondition
crudini --set /etc/heat/heat.conf DEFAULT heat_watch_server_url http://$heathost:8003
crudini --set /etc/heat/heat.conf DEFAULT heat_stack_user_role $heat_stack_user_role
crudini --set /etc/heat/heat.conf DEFAULT use_syslog False
crudini --set /etc/heat/heat.conf heat_api_cloudwatch bind_host 0.0.0.0
crudini --set /etc/heat/heat.conf heat_api_cloudwatch bind_port 8003
crudini --set /etc/heat/heat.conf heat_api bind_host 0.0.0.0
crudini --set /etc/heat/heat.conf heat_api bind_port 8004
#
# Keystone Authentication
#
crudini --set /etc/heat/heat.conf keystone_authtoken project_name $keystoneservicestenant
crudini --set /etc/heat/heat.conf keystone_authtoken username $heatuser
crudini --set /etc/heat/heat.conf keystone_authtoken password $heatpass
# crudini --set /etc/heat/heat.conf keystone_authtoken auth_uri http://$keystonehost:5000
crudini --set /etc/heat/heat.conf keystone_authtoken auth_url http://$keystonehost:35357
crudini --set /etc/heat/heat.conf keystone_authtoken project_domain_name $keystonedomain
crudini --set /etc/heat/heat.conf keystone_authtoken user_domain_name $keystonedomain
crudini --set /etc/heat/heat.conf keystone_authtoken signing_dir /tmp/keystone-signing-heat
# crudini --set /etc/heat/heat.conf keystone_authtoken auth_version v3
crudini --set /etc/heat/heat.conf keystone_authtoken auth_type password
# crudini --set /etc/heat/heat.conf keystone_authtoken auth_section keystone_authtoken
# crudini --set /etc/heat/heat.conf keystone_authtoken memcached_servers $keystonehost:11211
#
# crudini --set /etc/heat/heat.conf keystone_authtoken identity_uri http://$keystonehost:35357
# crudini --set /etc/heat/heat.conf keystone_authtoken admin_tenant_name $keystoneservicestenant
# crudini --set /etc/heat/heat.conf keystone_authtoken admin_user $heatuser
# crudini --set /etc/heat/heat.conf keystone_authtoken admin_password $heatpass
#
crudini --del /etc/heat/heat.conf keystone_authtoken auth_uri
crudini --del /etc/heat/heat.conf keystone_authtoken auth_version
crudini --del /etc/heat/heat.conf keystone_authtoken auth_section
crudini --del /etc/heat/heat.conf keystone_authtoken memcached_servers
crudini --del /etc/heat/heat.conf keystone_authtoken identity_uri
crudini --del /etc/heat/heat.conf keystone_authtoken admin_tenant_name
crudini --del /etc/heat/heat.conf keystone_authtoken admin_user
crudini --del /etc/heat/heat.conf keystone_authtoken admin_password
#
crudini --del /etc/heat/heat.conf keystone_authtoken auth_host
crudini --del /etc/heat/heat.conf keystone_authtoken auth_port
crudini --del /etc/heat/heat.conf keystone_authtoken auth_protocol
#
# crudini --set /etc/heat/heat.conf trustee project_name $keystoneservicestenant
crudini --set /etc/heat/heat.conf trustee username $heatuser
crudini --set /etc/heat/heat.conf trustee password $heatpass
# crudini --set /etc/heat/heat.conf trustee auth_uri http://$keystonehost:5000
crudini --set /etc/heat/heat.conf trustee auth_url http://$keystonehost:35357
crudini --set /etc/heat/heat.conf trustee project_domain_name $keystonedomain
crudini --set /etc/heat/heat.conf trustee user_domain_name $keystonedomain
# crudini --set /etc/heat/heat.conf trustee signing_dir /tmp/keystone-signing-heat
# crudini --set /etc/heat/heat.conf trustee auth_version v3
crudini --set /etc/heat/heat.conf trustee auth_plugin password
#
# crudini --set /etc/heat/heat.conf trustee identity_uri http://$keystonehost:35357
# crudini --set /etc/heat/heat.conf trustee admin_tenant_name $keystoneservicestenant
# crudini --set /etc/heat/heat.conf trustee admin_user $heatuser
# crudini --set /etc/heat/heat.conf trustee admin_password $heatpass
#
crudini --del /etc/heat/heat.conf trustee project_name
crudini --del /etc/heat/heat.conf trustee auth_uri
crudini --del /etc/heat/heat.conf trustee signing_dir
crudini --del /etc/heat/heat.conf trustee auth_version
crudini --del /etc/heat/heat.conf trustee identity_uri
crudini --del /etc/heat/heat.conf trustee admin_tenant_name
crudini --del /etc/heat/heat.conf trustee admin_user
crudini --del /etc/heat/heat.conf trustee admin_password
#
crudini --set /etc/heat/heat.conf clients_keystone auth_uri http://$keystonehost:35357
crudini --set /etc/heat/heat.conf ec2authtoken auth_uri http://$keystonehost:5000/v2.0/ec2tokens
crudini --set /etc/heat/heat.conf clients_heat url "http://$heathost:8004/v1/%(tenant_id)s"
#
# End of Keystone Auth Section
#
crudini --set /etc/heat/heat.conf DEFAULT control_exchange openstack
case $brokerflavor in
"qpid")
crudini --set /etc/heat/heat.conf DEFAULT rpc_backend qpid
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_hostname $messagebrokerhost
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_port 5672
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_username $brokeruser
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_password $brokerpass
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_heartbeat 60
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_protocol tcp
crudini --set /etc/heat/heat.conf oslo_messaging_qpid qpid_tcp_nodelay True
;;
"rabbitmq")
crudini --set /etc/heat/heat.conf DEFAULT rpc_backend rabbit
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_host $messagebrokerhost
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_password $brokerpass
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_userid $brokeruser
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_port 5672
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_use_ssl false
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_virtual_host $brokervhost
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_max_retries 0
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_retry_interval 1
crudini --set /etc/heat/heat.conf oslo_messaging_rabbit rabbit_ha_queues false
;;
esac
crudini --set /etc/heat/heat.conf DEFAULT stack_domain_admin $stack_domain_admin
crudini --set /etc/heat/heat.conf DEFAULT stack_domain_admin_password $stack_domain_admin_password
crudini --set /etc/heat/heat.conf DEFAULT stack_user_domain_name $stack_user_domain_name
if [ $ceilometerinstall == "yes" ]
then
crudini --set /etc/heat/heat.conf oslo_messaging_notifications driver messagingv2
fi
echo ""
echo "Heat Configured"
echo ""
#
# We proceed to provision/update HEAT Database
#
rm -f /var/lib/heat/heat.sqlite
echo ""
echo "Provisioning HEAT Database"
echo ""
chown -R heat.heat /var/log/heat /etc/heat
su -s /bin/sh -c "heat-manage db_sync" heat
chown -R heat.heat /var/log/heat /etc/heat
echo ""
echo "Done"
echo ""
#
# We proceed to apply IPTABLES rules and start/enable Heat services
#
echo ""
echo "Applying IPTABLES rules"
iptables -A INPUT -p tcp -m multiport --dports 8000,8004 -j ACCEPT
/etc/init.d/netfilter-persistent save
echo "Done"
echo ""
echo "Cleaning UP App logs"
for mylog in `ls /var/log/heat/*.log`; do echo "" > $mylog;done
echo "Done"
echo ""
echo ""
echo "Starting Services"
echo ""
systemctl start heat-api
systemctl start heat-api-cfn
systemctl start heat-engine
systemctl enable heat-api
systemctl enable heat-api-cfn
systemctl enable heat-engine
#
# Finally, we proceed to verify if HEAT was properlly installed. If not, we stop further procedings.
#
testheat=`dpkg -l heat-api 2>/dev/null|tail -n 1|grep -ci ^ii`
if [ $testheat == "0" ]
then
echo ""
echo "HEAT Installatio FAILED. Aborting !"
echo ""
exit 0
else
date > /etc/openstack-control-script-config/heat-installed
date > /etc/openstack-control-script-config/heat
fi
echo ""
echo "Heat Installed and Configured"
echo ""
|
dannyperez/openstack-mitaka-autoinstaller-debian8
|
modules/heatinstall.sh
|
Shell
|
gpl-3.0
| 11,036 |
# build script for Wikispeech
# mimic travis build tests, always run before pushing!
set -e
SLEEP=60
if [ $# -ne 0 ]; then
echo "For developers: If you are developing for Wikispeech, and need to make changes to this repository, make sure you run a test build using build_and_test.sh before you make a pull request. Don't run more than one instance of this script at once, and make sure no pronlex server is already running on the default port."
exit 0
fi
basedir=`dirname $0`
basedir=`realpath $basedir`
echo $basedir
cd $basedir
mkdir -p .build
#go test -v ./...
#gosec ./...
#staticcheck ./...
mkdir -p .build/appdir
for proc in `ps --sort pid -Af|egrep pronlex| egrep -v "grep .E"|sed 's/ */\t/g'|cut -f2`; do
kill $proc || echo "Couldn't kill $pid"
done
bash scripts/setup.sh -a .build/appdir -e sqlite
bash scripts/start_server.sh -a .build/appdir -e sqlite &
export pid=$!
echo "pronlex server started on pid $pid. wait for $SLEEP seconds before shutting down"
sleep $SLEEP
sh .travis/exit_server_and_fail_if_not_running.sh pronlex $pid
|
stts-se/pronlex
|
build_and_test.sh
|
Shell
|
gpl-3.0
| 1,072 |
#!/bin/bash
# script to resubmit next job after current job completed
# Andre R. Erler, 28/02/2013
# The following environment variables have to be set by the caller:
# INIDIR, RSTDIR, WRFSCRIPT, RESUBJOB, NEXTSTEP, NOWPS
# set default for $NOWPS and $RSTCNT, to avoid problems when passing variable to next job
NOWPS=${NOWPS:-'WPS'} # i.e. launch WPS, unless instructed otherwise
RSTCNT=${RSTCNT:-0} # assume no restart by default
# $NEXTSTEP is handled below
## launch WRF for next step (if $NEXTSTEP is not empty)
if [[ -n "${NEXTSTEP}" ]]
then
# read date string for restart file
RSTDATE=$(sed -n "/${NEXTSTEP}/ s/${NEXTSTEP}[[:space:]]\+'\([-_\:0-9]\{19\}\)'[[:space:]]\+'[-_\:0-9]\{19\}'$/\1/p" stepfile)
# N.B.: '[[:space:]]' also matches tabs; '\ ' only matches one space; '\+' means one or more
# some code to catch sed errors on TCS
if [[ -z "${RSTDATE}" ]]
then
echo ' ### ERROR: cannot read step file - aborting! ### '
# print some diagnostics
echo
echo 'Current PATH variable:'
echo "${PATH}"
echo
echo 'sed executable:'
which sed
echo
echo 'Stepfile line:'
grep "${NEXTSTEP}" stepfile
echo
echo 'stepfile stat:'
stat stepfile
echo
exit 1
fi # RSTDATE
# while [[ -z "${RSTDATE}" ]]
# do # loop appears to be necessary to prevent random read errors on TCS
# echo ' Error: could not read stepfile - trying again!'
# RSTDATE=$(sed -n "/${NEXTSTEP}/ s/${NEXTSTEP}[[:space:]].\(.*\).[[:space:]].*$/\1/p" stepfile)
# sleep 600 # prevent too much file access
# done
NEXTDIR="${INIDIR}/${NEXTSTEP}" # next $WORKDIR
cd "${NEXTDIR}"
# link restart files
echo
echo "Linking restart files to next working directory:"
echo "${NEXTDIR}"
for RESTART in "${RSTDIR}"/wrfrst_d??_${RSTDATE//:/[_:]}; do # match hh:mm:ss and hh_mm_ss
ln -sf "${RESTART}"; done
# check for WRF input files (in next working directory)
# N.B.: this option can potentially waste a lot of walltime and should be used with caution
if [[ "${WAITFORWPS}" == 'WAIT' ]] && [[ ! -f "${WPSSCRIPT}" ]]
then
echo
echo " *** Waiting for WPS to complete... ***"
echo
while [[ ! -f "${WPSSCRIPT}" ]]; do
sleep 30 # need faster turnover to submit next step
done
fi # $WAITFORWPS
# go back to initial directory
cd "${INIDIR}"
# now, decide what to do...
if [[ -f "${NEXTDIR}/${WPSSCRIPT}" ]]
then
if [ 0 -lt $(grep -c 'SUCCESS COMPLETE REAL_EM INIT' "${NEXTDIR}/real/rsl.error.0000") ]
then
# submit next job (start next cycle)
echo
echo " *** Launching WRF for next step: ${NEXTSTEP} *** "
echo
## waiting to allow filesystem to update
#echo
#echo " --- Waiting 30 min. to allow file system to update --- "
#echo
#sleep 1800 # wait 30 min.
# execute submission command (set in setup-script; machine-specific)
#eval "echo ${RESUBJOB}" # print command; now done with set -x
set -x
eval "${RESUBJOB}" # execute command
ERR=$? # capture exit status
set +x
exit $? # exit with exit status from reSubJob
else # WPS crashed
# do not continue
echo
echo " ### WPS for next step (${NEXTSTEP}) failed --- aborting! ### "
echo
exit 1
fi # if WPS successful
else # WPS not finished (yet)
# start a sleeper job, if available
if [[ -n "{SLEEPERJOB}" ]]
then
# submit next job (start next cycle)
echo
echo " --- WPS for next step (${NEXTSTEP}) has not finished yet --- "
echo " +++ Launching sleeper job to restart WRF when WPS finished +++ "
echo " (see log file below for details and job status) "
echo
# submit sleeper script (set in setup-script; machine-specific)
set -x
eval "${SLEEPERJOB}" # execute command
ERR=$? # capture exit status
set +x
exit $? # exit with exit status from reSubJob
else # WPS did not run - abort
# do not continue
echo
echo " ### WPS for next step (${NEXTSTEP}) failed --- aborting! ### "
echo
exit 1
fi # if sleeper job
fi # if WPS finished...
else
echo
echo ' === No $NEXTSTEP --- cycle terminated. === '
echo ' (no more jobs have been submitted) '
echo
exit 0 # most likely this is OK
fi # $NEXTSTEP
|
aerler/WRF-Tools
|
Scripts/Common/resubJob.sh
|
Shell
|
gpl-3.0
| 4,964 |
#!/bin/bash
BUTTERFLY_BUILD_ROOT=$1
BUTTERFLY_SRC_ROOT=$(cd "$(dirname $0)/../.." && pwd)
source $BUTTERFLY_SRC_ROOT/tests/functions.sh
network_connect 0 1
server_start 0
nic_add 0 1 42 sg-1
nic_add 0 2 42 sg-1
qemus_start 1 2
for i in {1..10}; do
ssh_no_connection_test udp 1 2 6000
ssh_no_connection_test udp 2 1 6000
sg_rule_add_port_open udp 0 6000 sg-1
ssh_connection_test udp 1 2 6000
ssh_connection_test udp 2 1 6000
sg_del 0 sg-1
ssh_no_connection_test tcp 1 2 6000
ssh_no_connection_test tcp 2 1 6000
sg_rule_add_port_open tcp 0 6000 sg-1
ssh_connection_test tcp 1 2 6000
ssh_connection_test tcp 2 1 6000
sg_del 0 sg-1
done
qemu_stop 1 2
server_stop 0
network_disconnect 0 1
return_result
|
outscale-jju/butterfly
|
tests/scenario_25/test.sh
|
Shell
|
gpl-3.0
| 753 |
#extract graphic characters
wget http://www.unicode.org/Public/5.1.0/ucdxml/ucd.all.flat.zip
unzip -p ucd.all.flat.zip | python3 ../parse.py > characters.html
wc -l characters.html
#100507
|
taroyabuki/onepage-unicode-chars
|
5.1/stage1.sh
|
Shell
|
gpl-3.0
| 192 |
#!/usr/bin/env bash
#set -o nounset #exit if an unset variable is used
set -o errexit #exit on any single command fail
# find voltdb binaries in either installation or distribution directory.
if [ -n "$(which voltdb 2> /dev/null)" ]; then
VOLTDB_BIN=$(dirname "$(which voltdb)")
else
VOLTDB_BIN="$(dirname $(dirname $(pwd)))/bin"
echo "The VoltDB scripts are not in your PATH."
echo "For ease of use, add the VoltDB bin directory: "
echo
echo $VOLTDB_BIN
echo
echo "to your PATH."
echo
fi
# move voltdb commands into path for this script
PATH=$VOLTDB_BIN:$PATH
# installation layout has all libraries in $VOLTDB_ROOT/lib/voltdb
if [ -d "$VOLTDB_BIN/../lib/voltdb" ]; then
VOLTDB_BASE=$(dirname "$VOLTDB_BIN")
VOLTDB_LIB="$VOLTDB_BASE/lib/voltdb"
VOLTDB_VOLTDB="$VOLTDB_LIB"
# distribution layout has libraries in separate lib and voltdb directories
else
VOLTDB_BASE=$(dirname "$VOLTDB_BIN")
VOLTDB_LIB="$VOLTDB_BASE/lib"
VOLTDB_VOLTDB="$VOLTDB_BASE/voltdb"
fi
APPCLASSPATH=$CLASSPATH:$({ \
\ls -1 "$VOLTDB_VOLTDB"/voltdb-*.jar; \
\ls -1 "$VOLTDB_LIB"/*.jar; \
\ls -1 "$VOLTDB_LIB"/kafka*.jar; \
\ls -1 "$VOLTDB_LIB"/extension/*.jar; \
} 2> /dev/null | paste -sd ':' - )
CLIENTCLASSPATH=client.jar:$CLASSPATH:$({ \
\ls -1 "$VOLTDB_VOLTDB"/voltdbclient-*.jar; \
\ls -1 "$VOLTDB_LIB"/kafka*.jar; \
\ls -1 "$VOLTDB_LIB"/slf4j-api-1.6.2.jar; \
} 2> /dev/null | paste -sd ':' - )
# LOG4J="$VOLTDB_VOLTDB/log4j.xml"
LICENSE="$VOLTDB_VOLTDB/license.xml"
HOST="localhost"
# remove binaries, logs, runtime artifacts, etc... but keep the jars
function clean() {
rm -rf debugoutput voltdbroot log catalog-report.html \
statement-plans build/*.class clientbuild/*.class
}
# remove everything from "clean" as well as the jarfiles
function cleanall() {
ant clean
}
# compile the source code for procedures and the client into jarfiles
function jars() {
ant all
cp formatter.jar $VOLTDB_BASE/bundles
}
# compile the procedure and client jarfiles if they don't exist
function jars-ifneeded() {
rm -rf felix-cache
if [ ! -e sp.jar ] || [ ! -e client.jar ]; then
jars;
fi
}
# run the voltdb server locally
# note -- use something like this to create the Kafka topic, name
# matching the name used in the deployment file:
# /home/opt/kafka/bin/kafka-topics.sh --zookeeper kafka2:2181 --topic A7_KAFKAEXPORTTABLE2 --partitions 2 --replication-factor 1 --create
function server() {
jars-ifneeded
echo "Starting the VoltDB server."
echo "Remember -- the Kafka topic must exist before launching this test."
echo "To perform this action manually, use the command line: "
echo
echo "voltdb create -d deployment.xml -l $LICENSE -H $HOST"
echo
voltdb create -d deployment.xml -l $LICENSE -H $HOST
}
#kafka importer
function kafka() {
jars-ifneeded
echo "Starting the VoltDB server."
echo "To perform this action manually, use the command line: "
echo
echo "voltdb create -d deployment-kafka.xml -l $LICENSE -H $HOST"
echo
voltdb create -d deployment-kafka.xml -l $LICENSE -H $HOST
}
# load schema and procedures
function init() {
jars-ifneeded
sqlcmd < ddl.sql
}
# wait for backgrounded server to start up
function wait_for_startup() {
until sqlcmd --query=' exec @SystemInformation, OVERVIEW;' > /dev/null 2>&1
do
sleep 2
echo " ... Waiting for VoltDB to start"
if [[ $SECONDS -gt 60 ]]
then
echo "Exiting. VoltDB did not startup within 60 seconds" 1>&2; exit 1;
fi
done
}
# startup server in background and load schema
function background_server_andload() {
jars-ifneeded
# run the server in the background
voltdb create -B -d deployment.xml -l $LICENSE -H $HOST > nohup.log 2>&1 &
wait_for_startup
init
}
# run the client that drives the example
function client() {
async-benchmark
}
# Asynchronous benchmark sample
# Use this target for argument help
function async-benchmark-help() {
jars-ifneeded
java -classpath $CLIENTCLASSPATH kafkaimporter.client.kafkaimporter.KafkaImportBenchmark --help
}
# latencyreport: default is OFF
# ratelimit: must be a reasonable value if lantencyreport is ON
# Disable the comments to get latency report
function async-benchmark() {
jars-ifneeded
java -classpath $CLIENTCLASSPATH \
client.kafkaimporter.KafkaImportBenchmark \
--displayinterval=5 \
--duration=180 \
--kafkaserverlist=localhost:9092 \
--alltypes=false \
--useexport=false \
--expected_rows=6000000 \
--servers=localhost
}
# The following two demo functions are used by the Docker package. Don't remove.
# compile the jars for procs and client code
function demo-compile() {
jars
}
function demo() {
echo "starting server in background..."
background_server_andload
echo "starting client..."
client
echo
echo When you are done with the demo database, \
remember to use \"voltadmin shutdown\" to stop \
the server process.
}
function help() {
echo "Usage: ./run.sh {clean|server|init|demo|client|async-benchmark|aysnc-benchmark-help}"
}
# Run the target passed as the first arg on the command line
# If no first arg, run server
if [ $# -gt 1 ]; then help; exit; fi
if [ $# = 1 ]; then $1; else server; fi
|
migue/voltdb
|
tests/test_apps/kafkaimporter/run.sh
|
Shell
|
agpl-3.0
| 5,420 |
#!/usr/bin/env sh
# Get the tag
if [ "$#" -gt 0 ] ; then
export TAG="$1"
else
export TAG="latest"
fi
# Get the build
if [ -n "$GITHUB_SHA" ] ; then
GIT_HASH=${GITHUB_SHA}
GIT_BRANCH=${GITHUB_REF#refs/heads/}
else
GIT_HASH=$(git rev-parse HEAD)
GIT_BRANCH=$(git symbolic-ref --short HEAD)
fi
GIT_HASH_SHORT=$(git rev-parse --short "$GIT_HASH")
export BUILD=${GIT_BRANCH}.${GIT_HASH_SHORT}
echo "Building mrsarm/django-coleman:${TAG} with image_build $BUILD ..."
#docker-compose build
docker build --build-arg=BUILD="$BUILD" -t mrsarm/django-coleman:${TAG} .
|
mrsarm/django-coleman
|
docker-build.sh
|
Shell
|
agpl-3.0
| 584 |
#!/usr/bin/env bash
#
# publish.sh
#
# Command that send your blog/website to the public area
rsync -a --partial --delete ${DESTDIR}/ ${PUBLISH_DESTINATION}/
|
blankoworld/makefly
|
tools/publish.sh
|
Shell
|
agpl-3.0
| 159 |
#!/bin/sh
### BEGIN INIT INFO
# Provides : Ait-Mlouk Addi ([email protected])
# site web : http://www.aitmlouk-addi.info/
# blog : http://aitmlouk-addi.blogspot.com/
# linkedin : http://ma.linkedin.com/pub/addi-ait-mlouk/56/850/32a
# viadeo : http://ma.viadeo.com/en/profile/addi.mlk
# Google+ : https://plus.google.com/+AitMloukAddi
# Gmail : [email protected]
### END INIT INFO
#beginning of script
#select all databases
#archive all databases by this name (date_hour)
#
DBNAME=`psql -Uopenerp -tq -dtemplate1 -c "select datname from pg_database"`
hours=$(date +%H-%M)
for DB in $DBNAME
do
BACKUPFILE=/opt/backup/$DB\_$hours\_`/bin/date +%D |sed 's;/;-;g'`
if [ "$DB" != "template0" ] && [ "$DB" != "template1" ]; then
/usr/bin/vacuumdb --analyze -Uopenerp $DB
/usr/bin/pg_dump -Uopenerp --column-inserts $DB | gzip -c > $BACKUPFILE.out.gz
fi
done
#End of script
|
aitmlouk/odoo-scripts
|
backup.sh
|
Shell
|
agpl-3.0
| 895 |
#!/usr/bin/env bash
# Kill the apt services holding a dpkg lock, so that the ansible-bootstrap
# script can run without conflicts.
# NOTE: this is a temporary fix. Instead, we should be doing what SRE does,
# and first run the security+common roles on a vanilla AMI, which will disable
# unattended-updates and set up users. Then we can feel free to run the
# ansible bootstrap without any problems.
set -xe
if grep -q 'Focal Fossa' /etc/os-release; then
systemctl stop apt-daily.service
systemctl kill --kill-who=all apt-daily.service
# Our jenkins job for building AMIs will timeout, even if the lock is
# never released.
while lsof |grep -q /var/lib/dpkg/lock; do
echo "Waiting for apt to release the dpkg lock..."
sleep 5
done
fi
|
edx/configuration
|
util/packer/stop-automatic-updates.sh
|
Shell
|
agpl-3.0
| 779 |
#!/bin/bash
# ------------------ Process Commandline Options -------------------
USAGE="Usage: "`basename $0`" [-u uid][-p][-h help]"
USERNAME=`whoami`
PASSWD=''
COURSE_SUBSTR=''
DB_NAME='Extracts'
needPasswd=false
TABLE_NAME=''
ALL_COLS=''
# Execute getopt
ARGS=`getopt -o "u:ph" -l "user:,password,help" \
-n "getopt.sh" -- "$@"`
#Bad arguments
if [ $? -ne 0 ];
then
exit 1
fi
# A little magic
eval set -- "$ARGS"
# Now go through all the options
while true;
do
case "$1" in
-u|--user)
shift
# Grab the option value:
if [ -n "$1" ]
then
USERNAME=$1
shift
fi;;
-p|--password)
needPasswd=true
shift;;
-h|--help)
echo $USAGE
exit 0
;;
--)
shift
break;;
esac
done
if $needPasswd
then
# The -s option suppresses echo:
read -s -p "Password for user '$USERNAME' on `hostname`'s MySQL server: " PASSWD
echo
else
# Get home directory of whichever user will
# log into MySQL:
HOME_DIR=$(getent passwd $USERNAME | cut -d: -f6)
# If the home dir has a readable file called mysql in its .ssh
# subdir, then pull the pwd from there:
if test -f $HOME_DIR/.ssh/mysql && test -r $HOME_DIR/.ssh/mysql
then
PASSWD=`cat $HOME_DIR/.ssh/mysql`
fi
fi
if [ -z $PASSWD ]
then
MYSQL_AUTH="-u $USERNAME"
else
MYSQL_AUTH="-u $USERNAME -p$PASSWD"
fi
mysql $MYSQL_AUTH -e "USE Edx; DROP TABLE IF EXISTS AllCourseDisplayNames;"
mysql $MYSQL_AUTH -e "USE Edx; CREATE TABLE AllCourseDisplayNames \
(course_display_name varchar(255) NOT NULL PRIMARY KEY) \
(SELECT DISTINCT course_display_name \
FROM EventXtract) \
UNION \
(SELECT DISTINCT course_display_name \
FROM ActivityGrade) \
;"
|
EDUlib/eTracesX
|
Translation_software/edx_to_MOOCdb_piping/import.openedx.apipe/scripts/makeCourseNameListTable.sh
|
Shell
|
agpl-3.0
| 1,901 |
#!/bin/bash
#
# This file is part of SerialPundit.
#
# Copyright (C) 2014-2021, Rishi Gupta. All rights reserved.
#
# The SerialPundit is DUAL LICENSED. It is made available under the terms of the GNU Affero
# General Public License (AGPL) v3.0 for non-commercial use and under the terms of a commercial
# license for commercial use of this software.
#
# The SerialPundit is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#################################################################################################
# This script makes it possible to change the latency timer's value from user space application.
# It change the permissions of latency timer file (sysfs entry) to read/write for all. Setting low
# timer value may be beneficial for frequent I/O while setting high values may be beneficial for bulk
# transfer.
# The default drivers in Linux kernel may not allow to change (ignore) timer value or the sysfs file
# itself may not exist. So it is required that either we need to write our own driver for this purpose
# or use drivers provided by FTDI at their website.
# An example sysfs file for latency timer is :
# /sys/devices/pci0000:00/0000:00:14.0/usb3/3-3/3-3:1.0/ttyUSB0/tty/ttyUSB0/device/latency_timer
# To see what are the environment variables set by udev redirect 'env' value and open spudevenv.txt
# file in text editor to see list of variables and their values.
# env >> /tmp/spudevenv.txt
# Input argument ($1) to this script is devpath for the device (udev rule %p).
chmod 0666 "/sys$1/device/latency_timer"
|
RishiGupta12/SerialPundit
|
tools-and-utilities/udev-ftdi-latency-timer.sh
|
Shell
|
agpl-3.0
| 1,677 |
#!/bin/sh
cd doc
make
|
mattam82/Coq-Equations
|
makedoc.sh
|
Shell
|
lgpl-2.1
| 23 |
#!/usr/bin/env bash
#
# Copyright (C) 2018 Gaëtan Harter <[email protected]>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
#
#
# Central test script to have sanity checks for the build system
# It is run unconditionally on all files.
#
#
: "${RIOTBASE:="$(cd "$(dirname "$0")/../../../" || exit; pwd)"}"
SCRIPT_PATH=dist/tools/buildsystem_sanity_check/check.sh
tab_indent() {
# Ident using 'bashism' to to the tab compatible with 'bsd-sed'
sed 's/^/\'$'\t/'
}
prepend() {
# 'i' needs 'i\{newline}' and a newline after for 'bsd-sed'
sed '1i\
'"$1"'
'
}
error_with_message() {
tab_indent | prepend "${1}"
}
# Modules should not check the content of FEATURES_PROVIDED/_REQUIRED/OPTIONAL
# Handling specific behaviors/dependencies should by checking the content of:
# * `USEMODULE`
# * maybe `FEATURES_USED` if it is not a module (== not a periph_)
check_not_parsing_features() {
local patterns=()
local pathspec=()
patterns+=(-e 'if.*filter.*FEATURES_PROVIDED')
patterns+=(-e 'if.*filter.*FEATURES_REQUIRED')
patterns+=(-e 'if.*filter.*FEATURES_OPTIONAL')
# Pathspec with exclude should start by an inclusive pathspec in git 2.7.4
pathspec+=('*')
# Ignore this file when matching as it self matches
pathspec+=(":!${SCRIPT_PATH}")
# These two files contain sanity checks using FEATURES_ so are allowed
pathspec+=(':!Makefile.include' ':!makefiles/info-global.inc.mk')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'Modules should not check the content of FEATURES_PROVIDED/_REQUIRED/OPTIONAL'
}
# Some variables do not need to be exported and even cause issues when being
# exported because they are evaluated even when not needed.
#
# Currently this blacklists exported variables instead of whitelisting or
# providing a mechanism for handling it.
# It just keep things not exported anymore in the future.
UNEXPORTED_VARIABLES=()
UNEXPORTED_VARIABLES+=('FLASHFILE')
UNEXPORTED_VARIABLES+=('TERMPROG' 'TERMFLAGS')
UNEXPORTED_VARIABLES+=('FLASHER' 'FFLAGS')
UNEXPORTED_VARIABLES+=('RESET' 'RESETFLAGS')
UNEXPORTED_VARIABLES+=('DEBUGGER' 'DEBUGGER_FLAGS')
UNEXPORTED_VARIABLES+=('DEBUGSERVER' 'DEBUGSERVER_FLAGS')
UNEXPORTED_VARIABLES+=('PREFLASHER' 'PREFFLAGS' 'FLASHDEPS')
UNEXPORTED_VARIABLES+=('DEBUG_ADAPTER' 'DEBUG_ADAPTER_ID')
UNEXPORTED_VARIABLES+=('PROGRAMMER_SERIAL')
UNEXPORTED_VARIABLES+=('STLINK_VERSION')
UNEXPORTED_VARIABLES+=('PORT_LINUX' 'PORT_DARWIN')
UNEXPORTED_VARIABLES+=('PORT[ ?=:]' 'PORT$')
EXPORTED_VARIABLES_ONLY_IN_VARS=()
EXPORTED_VARIABLES_ONLY_IN_VARS+=('CPU_ARCH')
EXPORTED_VARIABLES_ONLY_IN_VARS+=('CPU_FAM')
check_not_exporting_variables() {
local patterns=()
local pathspec=()
for variable in "${UNEXPORTED_VARIABLES[@]}"; do
patterns+=(-e "export[[:blank:]]\+${variable}")
done
git -C "${RIOTBASE}" grep "${patterns[@]}" \
| error_with_message 'Variables must not be exported:'
# Some variables may still be exported in 'makefiles/vars.inc.mk' as the
# only place that should export common variables
pathspec+=('*')
pathspec+=(':!makefiles/vars.inc.mk')
patterns=()
for variable in "${EXPORTED_VARIABLES_ONLY_IN_VARS[@]}"; do
patterns+=(-e "export[[:blank:]]\+${variable}")
done
# Only run if there are patterns, otherwise it matches everything
if [ ${#patterns[@]} -ne 0 ]; then
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'Variables must only be exported in `makefiles/vars.inc.mk`:'
fi
}
# Deprecated variables or patterns
# Prevent deprecated variables or patterns to re-appear after cleanup
check_deprecated_vars_patterns() {
local patterns=()
local pathspec=()
patterns+=(-e 'FEATURES_MCU_GROUP')
patterns+=(-e 'TEST_ON_CI_WHITELIST += all')
# Pathspec with exclude should start by an inclusive pathspec in git 2.7.4
pathspec+=('*')
# Ignore this file when matching as it self matches
pathspec+=(":!${SCRIPT_PATH}")
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'Deprecated variables or patterns:'
}
# Makefile files cpu must not be included by the board anymore
# They are included by the main Makefile.include/Makefile.features/Makefile.dep
check_board_do_not_include_cpu_features_dep() {
local patterns=()
local pathspec=()
# shellcheck disable=SC2016
# Single quotes are used to not expand expressions
patterns+=(-e 'include $(RIOTCPU)/.*/Makefile\..*')
pathspec+=('boards/')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'Makefiles files from cpu must not be included by the board anymore'
}
# CPU and CPU_MODEL definition have been moved to 'BOARD|CPU/Makefile.features'
check_cpu_cpu_model_defined_in_makefile_features() {
local patterns=()
local pathspec=()
# With our without space and with or without ?=
patterns+=(-e '^ *\(export\)\? *CPU \??\?=')
patterns+=(-e '^ *\(export\)\? *CPU_MODEL \??\?=')
pathspec+=(':!boards/**/Makefile.features')
pathspec+=(':!cpu/**/Makefile.features')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'CPU and CPU_MODEL definition must be done by board/BOARD/Makefile.features, board/common/**/Makefile.features or cpu/CPU/Makefile.features'
}
# Applications Makefile must not set 'BOARD =' unconditionally
check_not_setting_board_equal() {
local patterns=()
local pathspec=()
patterns+=(-e '^[[:space:]]*BOARD[[:space:]]*=')
pathspec+=('**/Makefile')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'Applications Makefile should use "BOARD ?="'
}
# Examples must not provide BOARD_INSUFFICIENT_MEMORY in Makefile, but in
# Makefile.ci
check_board_insufficient_memory_not_in_makefile() {
local patterns=()
local pathspec=()
patterns+=(-e '^[[:space:]]*BOARD_INSUFFICIENT_MEMORY[[:space:]:+]*=')
pathspec+=('**/Makefile')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message 'Move BOARD_INSUFFICIENT_MEMORY to Makefile.ci'
}
# Test applications must not define the APPLICATION variable
checks_tests_application_not_defined_in_makefile() {
local patterns=()
local pathspec=()
patterns+=(-e '^[[:space:]]*APPLICATION[[:space:]:+]=')
pathspec+=('tests/**/Makefile')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message "Don't define APPLICATION in test Makefile"
}
# Develhelp should not be set via CFLAGS
checks_develhelp_not_defined_via_cflags() {
local patterns=()
local pathspec=()
patterns+=(-e '^[[:space:]]*CFLAGS[[:space:]:+]+=[[:space:]:+]-DDEVELHELP')
pathspec+=('**/Makefile')
git -C "${RIOTBASE}" grep "${patterns[@]}" -- "${pathspec[@]}" \
| error_with_message "Use DEVELHELP ?= 1 instead of using CFLAGS directly"
}
error_on_input() {
! grep ''
}
all_checks() {
check_not_parsing_features
check_not_exporting_variables
check_deprecated_vars_patterns
check_board_do_not_include_cpu_features_dep
check_cpu_cpu_model_defined_in_makefile_features
check_not_setting_board_equal
check_board_insufficient_memory_not_in_makefile
checks_tests_application_not_defined_in_makefile
checks_develhelp_not_defined_via_cflags
}
main() {
all_checks | prepend 'Invalid build system patterns found by '"${0}:" | error_on_input >&2
exit $?
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main
fi
|
aeneby/RIOT
|
dist/tools/buildsystem_sanity_check/check.sh
|
Shell
|
lgpl-2.1
| 7,826 |
#!/bin/sh
set -e
set -x
rm -rf dbmod
# Debugging + logging
#FLAGS = -DDEBUG -g -D_DEBUG
# Logging
#FLAGS = -DDPRNT -O3
# Profiling
#FLAGS = -DNDEBUG -O3 -pg
# Normal
FLAGS='-DNDEBUG -O3'
COPT="${FLAGS} -DUNIX -Dcdecl= -D__NO_ANONYMOUS_UNIONS__ -Dstrnicmp=strncasecmp -Dstricmp=strcasecmp -Ddp_ANET2"
mkdir dbmod
cd dbmod
gcc ${COPT} "-DPACK=__attribute__ ((packed))" -DSIMNET -I../../../h -I../../dp -I../../../demo/utils ../dbmod.c ../tcapw.c ../crypttab.c ../../../demo/utils/mywcs.c ../../3rdparty/d3des/d3des.c ../../3rdparty/md5/md5c.c ../../../demo/utils/eclock.c ../../dp/assoctab.c ../../dp/dynatab.c -o dbmod
|
BygoneWorlds/anet
|
src/tca/mkdbmod.sh
|
Shell
|
lgpl-2.1
| 624 |
#!/usr/bin/env bash
. ./wvtest-bup.sh || exit $?
. t/lib.sh || exit $?
set -o pipefail
mb=1048576
top="$(WVPASS pwd)" || exit $?
tmpdir="$(WVPASS wvmktempdir)" || exit $?
readonly mb top tmpdir
export BUP_DIR="$tmpdir/bup"
export GIT_DIR="$tmpdir/bup"
bup() { "$top/bup" "$@"; }
WVPASS cd "$tmpdir"
# The 3MB guess is semi-arbitrary, but we've been informed that
# Lustre, for example, uses 1MB, so guess higher than that, at least.
block_size=$(bup-python -c \
"import os; print getattr(os.stat('.'), 'st_blksize', 0) or $mb * 3") \
|| exit $?
data_size=$((block_size * 10))
readonly block_size data_size
WVPASS dd if=/dev/zero of=test-sparse-probe seek="$data_size" bs=1 count=1
probe_size=$(WVPASS du -k -s test-sparse-probe | WVPASS cut -f1) || exit $?
if [ "$probe_size" -ge "$((data_size / 1024))" ]; then
WVSTART "no sparse support detected -- skipping tests"
exit 0
fi
WVSTART "sparse restore on $(current-filesystem), assuming ${block_size}B blocks"
WVPASS bup init
WVPASS mkdir src
WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1
WVPASS bup index src
WVPASS bup save -n src src
WVSTART "sparse file restore (all sparse)"
WVPASS bup restore -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -ge "$((data_size / 1024))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --no-sparse (all sparse)"
WVPASS rm -r restore
WVPASS bup restore --no-sparse -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -ge "$((data_size / 1024))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --sparse (all sparse)"
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -le "$((3 * (block_size / 1024)))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --sparse (sparse end)"
WVPASS echo "start" > src/foo
WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1 conv=notrunc
WVPASS bup index src
WVPASS bup save -n src src
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -le "$((3 * (block_size / 1024)))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --sparse (sparse middle)"
WVPASS echo "end" >> src/foo
WVPASS bup index src
WVPASS bup save -n src src
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -le "$((5 * (block_size / 1024)))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --sparse (bracketed zero run in buf)"
WVPASS echo 'x' > src/foo
WVPASS dd if=/dev/zero bs=1 count=512 >> src/foo
WVPASS echo 'y' >> src/foo
WVPASS bup index src
WVPASS bup save -n src src
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --sparse (sparse start)"
WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1
WVPASS echo "end" >> src/foo
WVPASS bup index src
WVPASS bup save -n src src
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -le "$((5 * (block_size / 1024)))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVSTART "sparse file restore --sparse (sparse start and end)"
WVPASS dd if=/dev/zero of=src/foo seek="$data_size" bs=1 count=1
WVPASS echo "middle" >> src/foo
WVPASS dd if=/dev/zero of=src/foo seek=$((2 * data_size)) bs=1 count=1 conv=notrunc
WVPASS bup index src
WVPASS bup save -n src src
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
restore_size=$(WVPASS du -k -s restore | WVPASS cut -f1) || exit $?
WVPASS [ "$restore_size" -le "$((5 * (block_size / 1024)))" ]
WVPASS "$top/t/compare-trees" -c src/ restore/src/
if test "$block_size" -gt $mb; then
random_size="$block_size"
else
random_size=1M
fi
WVSTART "sparse file restore --sparse (random $random_size)"
WVPASS bup random 1M > src/foo
WVPASS bup index src
WVPASS bup save -n src src
WVPASS rm -r restore
WVPASS bup restore --sparse -C restore "src/latest/$(pwd)/"
WVPASS "$top/t/compare-trees" -c src/ restore/src/
WVPASS rm -rf "$tmpdir"
|
tjanez/bup
|
t/test-sparse-files.sh
|
Shell
|
lgpl-2.1
| 4,647 |
#!/bin/sh
#############################################################################
##
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is the build configuration utility of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3.0 as published by the Free Software
## Foundation and appearing in the file LICENSE.GPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU General Public License version 3.0 requirements will be
## met: http://www.gnu.org/copyleft/gpl.html.
##
##
## $QT_END_LICENSE$
##
#############################################################################
# This script generates cryptographic keys of different types.
#--- RSA ---------------------------------------------------------------------------
# Note: RSA doesn't require the key size to be divisible by any particular number
for size in 40 511 512 999 1023 1024 2048
do
echo -e "\ngenerating RSA private key to PEM file ..."
openssl genrsa -out rsa-pri-$size.pem $size
echo -e "\ngenerating RSA private key to DER file ..."
openssl rsa -in rsa-pri-$size.pem -out rsa-pri-$size.der -outform DER
echo -e "\ngenerating RSA public key to PEM file ..."
openssl rsa -in rsa-pri-$size.pem -pubout -out rsa-pub-$size.pem
echo -e "\ngenerating RSA public key to DER file ..."
openssl rsa -in rsa-pri-$size.pem -pubout -out rsa-pub-$size.der -outform DER
done
#--- DSA ----------------------------------------------------------------------------
# Note: DSA requires the key size to be in interval [512, 1024] and be divisible by 64
for size in 512 576 960 1024
do
echo -e "\ngenerating DSA parameters to PEM file ..."
openssl dsaparam -out dsapar-$size.pem $size
echo -e "\ngenerating DSA private key to PEM file ..."
openssl gendsa dsapar-$size.pem -out dsa-pri-$size.pem
/bin/rm dsapar-$size.pem
echo -e "\ngenerating DSA private key to DER file ..."
openssl dsa -in dsa-pri-$size.pem -out dsa-pri-$size.der -outform DER
echo -e "\ngenerating DSA public key to PEM file ..."
openssl dsa -in dsa-pri-$size.pem -pubout -out dsa-pub-$size.pem
echo -e "\ngenerating DSA public key to DER file ..."
openssl dsa -in dsa-pri-$size.pem -pubout -out dsa-pub-$size.der -outform DER
done
|
CodeDJ/qt5-hidpi
|
qt/qtbase/tests/auto/network/ssl/qsslkey/keys/genkeys.sh
|
Shell
|
lgpl-2.1
| 3,608 |
rsync -avz --progress /home/pedro/Projetos/LNBIO/SH/Code/src/Java\ Src/SimpleHistogramPanel/src/ ./src/
|
pdroalves/SimpleHistogramPanel
|
sync.sh
|
Shell
|
lgpl-3.0
| 104 |
$HOME/stm32/stlink/st-flash write SPI-dma-lcd.bin 0x08000000
|
llooxy2112/STM32-demo
|
SPI-dma-lcd/flash.sh
|
Shell
|
lgpl-3.0
| 61 |
#!/bin/bash
gem build verum.gemspec
sudo gem install verum-0.3.0.gem
|
paulosalem/verum
|
install_gem_locally.sh
|
Shell
|
lgpl-3.0
| 69 |
#!/bin/bash
set -eu
#
# tools/build-win64-toolchain.sh: Win64 toolchain build script.
#
# n64chain: A (free) open-source N64 development toolchain.
# Copyright 2014-16 Tyler J. Stachecki <[email protected]>
#
# This file is subject to the terms and conditions defined in
# 'LICENSE', which is part of this source code package.
#
BINUTILS="ftp://ftp.gnu.org/gnu/binutils/binutils-2.34.tar.bz2"
GCC="ftp://ftp.gnu.org/gnu/gcc/gcc-10.1.0/gcc-10.1.0.tar.gz"
GMP="ftp://ftp.gnu.org/gnu/gmp/gmp-6.2.0.tar.bz2"
MAKE="ftp://ftp.gnu.org/gnu/make/make-4.2.1.tar.bz2"
MPC="ftp://ftp.gnu.org/gnu/mpc/mpc-1.1.0.tar.gz"
MPFR="ftp://ftp.gnu.org/gnu/mpfr/mpfr-4.0.2.tar.bz2"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd ${SCRIPT_DIR} && mkdir -p {stamps,tarballs}
export PATH="${PATH}:${SCRIPT_DIR}/bin"
if [ ! -f stamps/binutils-download ]; then
wget "${BINUTILS}" -O "tarballs/$(basename ${BINUTILS})"
touch stamps/binutils-download
fi
if [ ! -f stamps/binutils-extract ]; then
mkdir -p binutils-{build,source}
tar -xf tarballs/$(basename ${BINUTILS}) -C binutils-source --strip 1
touch stamps/binutils-extract
fi
if [ ! -f stamps/binutils-configure ]; then
pushd binutils-build
../binutils-source/configure \
--build=x86_64-linux-gnu \
--host=x86_64-w64-mingw32 \
--prefix="${SCRIPT_DIR}" \
--with-lib-path="${SCRIPT_DIR}/lib" \
--target=mips64-elf --with-arch=vr4300 \
--enable-64-bit-bfd \
--enable-plugins \
--enable-shared \
--disable-gold \
--disable-multilib \
--disable-nls \
--disable-rpath \
--disable-static \
--disable-werror
popd
touch stamps/binutils-configure
fi
if [ ! -f stamps/binutils-build ]; then
pushd binutils-build
make
popd
touch stamps/binutils-build
fi
if [ ! -f stamps/binutils-install ]; then
pushd binutils-build
make install
popd
touch stamps/binutils-install
fi
if [ ! -f stamps/gmp-download ]; then
wget "${GMP}" -O "tarballs/$(basename ${GMP})"
touch stamps/gmp-download
fi
if [ ! -f stamps/mpfr-download ]; then
wget "${MPFR}" -O "tarballs/$(basename ${MPFR})"
touch stamps/mpfr-download
fi
if [ ! -f stamps/mpc-download ]; then
wget "${MPC}" -O "tarballs/$(basename ${MPC})"
touch stamps/mpc-download
fi
if [ ! -f stamps/gcc-download ]; then
wget "${GCC}" -O "tarballs/$(basename ${GCC})"
touch stamps/gcc-download
fi
if [ ! -f stamps/gcc-extract ]; then
mkdir -p gcc-{build,source}
tar -xf tarballs/$(basename ${GCC}) -C gcc-source --strip 1
touch stamps/gcc-extract
fi
if [ ! -f stamps/gmp-extract ]; then
mkdir -p gcc-source/gmp
tar -xf tarballs/$(basename ${GMP}) -C gcc-source/gmp --strip 1
touch stamps/gmp-extract
fi
if [ ! -f stamps/mpfr-extract ]; then
mkdir -p gcc-source/mpfr
tar -xf tarballs/$(basename ${MPFR}) -C gcc-source/mpfr --strip 1
touch stamps/mpfr-extract
fi
if [ ! -f stamps/mpc-extract ]; then
mkdir -p gcc-source/mpc
tar -xf tarballs/$(basename ${MPC}) -C gcc-source/mpc --strip 1
touch stamps/mpc-extract
fi
if [ ! -f stamps/gcc-configure ]; then
pushd gcc-build
../gcc-source/configure \
--build=x86_64-linux-gnu \
--host=x86_64-w64-mingw32 \
--prefix="${SCRIPT_DIR}" \
--target=mips64-elf --with-arch=vr4300 \
--enable-languages=c --without-headers --with-newlib \
--with-gnu-as=${SCRIPT_DIR}/bin/mips64-elf-as.exe \
--with-gnu-ld=${SCRIPT_DIR}/bin/mips64-elf-ld.exe \
--enable-checking=release \
--enable-shared \
--enable-shared-libgcc \
--disable-decimal-float \
--disable-gold \
--disable-libatomic \
--disable-libgomp \
--disable-libitm \
--disable-libquadmath \
--disable-libquadmath-support \
--disable-libsanitizer \
--disable-libssp \
--disable-libunwind-exceptions \
--disable-libvtv \
--disable-multilib \
--disable-nls \
--disable-rpath \
--disable-static \
--disable-symvers \
--disable-threads \
--disable-win32-registry \
--enable-lto \
--enable-plugin \
--without-included-gettext
popd
touch stamps/gcc-configure
fi
if [ ! -f stamps/gcc-build ]; then
pushd gcc-build
make all-gcc
popd
touch stamps/gcc-build
fi
if [ ! -f stamps/gcc-install ]; then
pushd gcc-build
make install-gcc
popd
# While not necessary, this is still a good idea.
pushd "${SCRIPT_DIR}/bin"
cp mips64-elf-{gcc,cc}.exe
popd
touch stamps/gcc-install
fi
if [ ! -f stamps/make-download ]; then
wget "${MAKE}" -O "tarballs/$(basename ${MAKE})"
touch stamps/make-download
fi
if [ ! -f stamps/make-extract ]; then
mkdir -p make-{build,source}
tar -xf tarballs/$(basename ${MAKE}) -C make-source --strip 1
touch stamps/make-extract
fi
if [ ! -f stamps/make-configure ]; then
pushd make-build
../make-source/configure \
--build=x86_64-linux-gnu \
--host=x86_64-w64-mingw32 \
--prefix="${SCRIPT_DIR}" \
--disable-largefile \
--disable-nls \
--disable-rpath
popd
touch stamps/make-configure
fi
if [ ! -f stamps/make-build ]; then
pushd make-build
make
popd
touch stamps/make-build
fi
if [ ! -f stamps/make-install ]; then
pushd make-build
make install
popd
touch stamps/make-install
fi
if [ ! -f stamps/checksum-build ]; then
x86_64-w64-mingw32-gcc -Wall -Wextra -pedantic -std=c99 -O2 checksum.c -o bin/checksum.exe
touch stamps/checksum-build
fi
if [ ! -f stamps/mkfs-build ]; then
x86_64-w64-mingw32-gcc -Wall -Wextra -pedantic -std=c99 -O2 mkfs.c -o bin/mkfs.exe
touch stamps/mkfs-build
fi
if [ ! -f stamps/rspasm-build ]; then
pushd "${SCRIPT_DIR}/../rspasm"
make clean
CC=x86_64-w64-mingw32-gcc RSPASM_LIBS="-lws2_32" make
cp rspasm ${SCRIPT_DIR}/bin/rspasm.exe
popd
touch stamps/rspasm-build
fi
rm -rf "${SCRIPT_DIR}"/../tools/tarballs
rm -rf "${SCRIPT_DIR}"/../tools/*-source
rm -rf "${SCRIPT_DIR}"/../tools/*-build
rm -rf "${SCRIPT_DIR}"/../tools/stamps
exit 0
|
tj90241/n64chain
|
tools/build-win64-toolchain.sh
|
Shell
|
lgpl-3.0
| 5,934 |
#!/bin/bash
while true; do
s=$(nc -p 2300 -l |perl -n -e 'printf "magic=%s from=%s type=%s data=%s\n", unpack("a4 a12 a12 a48");')
date "+%Y-%m-%d %T %Z : $s"
done
|
matthewg42/Mebm
|
mebm_test/test_listen.sh
|
Shell
|
lgpl-3.0
| 168 |
#!/bin/bash
# Clockslow -- A tool to trick app to let it think time goes slower or faster
# Copyright (C) 2014 StarBrilliant <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
if [ "$#" -lt 1 ]
then
echo 'Clockslow: Trick app to let it think time goes slower or faster, inspired by'
echo 'Speed Gear.'
echo "Usage $0 [-v] speed_factor [command]"
echo ''
exit 1
fi
if [ -e ./libclockslow.so ]
then
if [ -n "$LD_LIBRARY_PATH" ]
then
NEW_LD_LIBRARY_PATH="$(pwd):$LD_LIBRARY_PATH"
else
NEW_LD_LIBRARY_PATH="$(pwd)"
fi
else
NEW_LD_LIBRARY_PATH="$LD_LIBRARY_PATH"
fi
if [ "$1" = "-v" ]
then
export CLOCKSLOW_VERBOSE=1
shift
fi
if [ -n "$1" ]
then
export CLOCKSLOW_FACTOR="$1"
shift
fi
export CLOCKSLOW_START="$(date +%s)"
export LD_LIBRARY_PATH="$NEW_LD_LIBRARY_PATH"
export LD_PRELOAD=libclockslow.so
exec "$@"
echo 'Command not specified, starting bash in Clockslow environment.'
exec bash -i
|
m13253/clockslow
|
clockslow.sh
|
Shell
|
lgpl-3.0
| 1,595 |
#!/bin/bash
svn update
rm -f ~/.gtranslator/umtf/personal-learn-buffer.xml
printf "po/pt_BR.po\ny" | /usr/local/share/gtranslator/scripts/build-gtranslator-learn-buffer.sh
make .bin/pot/pw3270.pot
cp .bin/pot/pw3270.pot po/pt_BR.po
gtranslator --auto-translate=po/pt_BR.po > /dev/null 2>&1
gtranslator po/pt_BR.po > /dev/null 2>&1
TEMPFILE=$(mktemp)
msgfmt -c -v -o $TEMPFILE po/pt_BR.po
if [ "$?" != "0" ]; then
exit -1
fi
cp -f $TEMPFILE /usr/local/share/locale/pt_BR/LC_MESSAGES/pw3270.mo
if [ "$?" != "0" ]; then
exit -1
fi
rm -f $TEMPFILE
|
laubstein/pw3270
|
ptbr.sh
|
Shell
|
lgpl-3.0
| 555 |
#!/bin/sh
set -ue
umask 0027
export LANG='C'
IFS='
'
: ${SRCDIR:=${HOME}/.dotfiles}
: ${DESTDIR:=${HOME}}
: ${SRCTYPE:=${SRCDIR}/.src_type}
# create_symlinks FROM TO
create_symlinks () {
(
srcdir=$1
destdir=$2
find "${srcdir}" -type d | \
sed "s%^${srcdir}/%${destdir}/.%" | \
xargs mkdir -p >/dev/null 2>&1
files=$(find "${srcdir}" -type f)
for file in ${files}; do
dest="${destdir}/.${file#${srcdir}/}"
[ ! -L "${dest}" ] && ln -s -- "${file}" "${dest}"
done
return 0
)
}
# remove_symlinks FROM TO ${REMOVE_ALL_SYMLINKS:-false}
remove_symlinks () {
(
for d in $(find $1 -type d); do
case "$d" in
"$1") path=$2;;
*) path="$2/.${d#$1/}";;
esac
[ ! -d "$path" ] && continue
for l in $(find "$path" -maxdepth 1 -type l); do
realFile=$(readlink "$l")
case "$realFile" in
$1*)
[ "_${3:-}" = '_true' -o ! -e "$l" ] \
&& rm -f "$l"
;;
esac
done
rmdir -p "$d" >/dev/null 2>&1 || true
done
return 0
)
}
check_srctype () {
find "${SRCDIR}" -maxdepth 1 -type d ! -name '.*' | \
fgrep -q "${SRCDIR}/$1"
}
__update () {
(
check_srctype "$t" || return
printf -- '-> create symlinks\n'
create_symlinks "${SRCDIR}/$1" "${DESTDIR}"
if [ -f "${SRCDIR}/$1.sh" ]; then
printf -- '-> module install\n'
. "${SRCDIR}/$1.sh"
module_install
fi
)
}
__purge () {
(
check_srctype "$t" || return
if [ -f "${SRCDIR}/$1.sh" ]; then
printf -- '-> module uninstall\n'
. "${SRCDIR}/$1.sh"
module_uninstall
fi
printf -- '-> remove symlinks\n'
remove_symlinks "${SRCDIR}/$1" "${DESTDIR}" true
)
}
dotfiles_install () {
if [ -f "${SRCTYPE}" ]; then
printf -- '! dotfiles are already installed.\n'
return
fi
if [ "$#" -eq 0 ]; then
printf -- '! no src_type defined.\n'
return
fi
for t in $@; do
printf -- 'type: %s\n' "$t"
__update $t
done
printf '%s\n' "$*" > "${SRCTYPE}"
}
dotfiles_uninstall () {
if [ ! -f "${SRCTYPE}" ]; then
printf -- '! dotfiles are not installed. (or broken)\n'
return
fi
read -r _type < "${SRCTYPE}"
for t in ${_type}; do
printf -- 'type: %s\n' "$t"
__purge "$t"
done
rm -f "${SRCTYPE}"
}
dotfiles_sync () {
[ -f "${SRCTYPE}" ] \
&& read -r _type < "${SRCTYPE}"
[ -z "${_type:-}" ] && return 1
for t in ${_type}; do
printf -- 'type: %s\n' "$t"
check_srctype "$t" || continue
printf -- '-> remove dangling symlinks\n'
remove_symlinks "${SRCDIR}/$t" "${DESTDIR}"
__update $t
done
}
cmd="${1:-}"
[ "$#" -gt 0 ] && shift 1
case "$cmd" in
'install') dotfiles_install "$@";;
'uninstall') dotfiles_uninstall;;
'sync' | '') dotfiles_sync;;
*) printf -- 'Usage: %s [(install [src_type ...]|uninstall|sync)]\n' "$0";;
esac
|
glabra/dotfiles
|
bootstrap.sh
|
Shell
|
unlicense
| 2,684 |
#!/usr/bin/env bash
set -e
CA_PATH=../ca
CERTS_PATH=server-certs
export CA_DIR=$CA_PATH
export INTERMEDIATE_DIR=$CA_PATH/intermediate
mkdir -p $CERTS_PATH/certs $CERTS_PATH/csr $CERTS_PATH/private
cp openssl/server/openssl*.conf $CERTS_PATH/
cd $CERTS_PATH
# Consul Server
openssl genrsa \
-out private/consul-server.key.pem 8192
chmod 400 private/consul-server.key.pem
openssl req -config openssl-consul-server.conf \
-key private/consul-server.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=Consul Server" \
-new -sha256 -out csr/consul-server.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions client_server_cert -batch -days 1825 -notext -md sha256 \
-in csr/consul-server.csr.pem \
-out certs/consul-server.cert.pem
chmod 444 certs/consul-server.cert.pem
cat certs/consul-server.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/consul-server-chain.cert.pem
openssl x509 -noout -text \
-in certs/consul-server.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/consul-server.cert.pem
# Consul Agent
openssl genrsa \
-out private/consul-agent.key.pem 8192
chmod 400 private/consul-agent.key.pem
openssl req -config openssl-consul-agent.conf \
-key private/consul-agent.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=Consul Agent" \
-new -sha256 -out csr/consul-agent.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions client_server_cert -batch -days 1825 -notext -md sha256 \
-in csr/consul-agent.csr.pem \
-out certs/consul-agent.cert.pem
chmod 444 certs/consul-agent.cert.pem
cat certs/consul-agent.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/consul-agent-chain.cert.pem
openssl x509 -noout -text \
-in certs/consul-agent.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/consul-agent.cert.pem
# Vault Server
openssl genrsa \
-out private/vault-server.key.pem 8192
chmod 400 private/vault-server.key.pem
openssl req -config openssl-vault-server.conf \
-key private/vault-server.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=Vault Server" \
-new -sha256 -out csr/vault-server.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions client_server_cert -batch -days 1825 -notext -md sha256 \
-in csr/vault-server.csr.pem \
-out certs/vault-server.cert.pem
chmod 444 certs/vault-server.cert.pem
cat certs/vault-server.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/vault-server-chain.cert.pem
openssl x509 -noout -text \
-in certs/vault-server.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/vault-server.cert.pem
# Vault Agent
openssl genrsa \
-out private/vault-agent.key.pem 8192
chmod 400 private/vault-agent.key.pem
openssl req -config openssl-agent.conf \
-key private/vault-agent.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=Vault Agent" \
-new -sha256 -out csr/vault-agent.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions client_server_cert -batch -days 1825 -notext -md sha256 \
-in csr/vault-agent.csr.pem \
-out certs/vault-agent.cert.pem
chmod 444 certs/vault-agent.cert.pem
cat certs/vault-agent.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/vault-agent-chain.cert.pem
openssl x509 -noout -text \
-in certs/vault-agent.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/vault-agent.cert.pem
# Nomad Server
openssl genrsa \
-out private/nomad-server.key.pem 8192
chmod 400 private/nomad-server.key.pem
openssl req -config openssl-nomad-server.conf \
-key private/nomad-server.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=Nomad Server" \
-new -sha256 -out csr/nomad-server.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions client_server_cert -batch -days 1825 -notext -md sha256 \
-in csr/nomad-server.csr.pem \
-out certs/nomad-server.cert.pem
chmod 444 certs/nomad-server.cert.pem
cat certs/nomad-server.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/nomad-server-chain.cert.pem
openssl x509 -noout -text \
-in certs/nomad-server.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/nomad-server.cert.pem
# Nomad Agent
openssl genrsa \
-out private/nomad-agent.key.pem 8192
chmod 400 private/nomad-agent.key.pem
openssl req -config openssl-nomad-agent.conf \
-key private/nomad-agent.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=Nomad Agent" \
-new -sha256 -out csr/nomad-agent.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions client_server_cert -batch -days 1825 -notext -md sha256 \
-in csr/nomad-agent.csr.pem \
-out certs/nomad-agent.cert.pem
chmod 444 certs/nomad-agent.cert.pem
cat certs/nomad-agent.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/nomad-agent-chain.cert.pem
openssl x509 -noout -text \
-in certs/nomad-agent.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/nomad-agent.cert.pem
# UI Server
openssl genrsa \
-out private/ui-server.key.pem 8192
chmod 400 private/ui-server.key.pem
openssl req -config openssl-ui-server.conf \
-key private/ui-server.key.pem \
-subj "/C=ES/ST=Spain/L=Madrid/O=Acme S.A./CN=UI Server" \
-new -sha256 -out csr/ui-server.csr.pem
openssl ca -config $CA_PATH/intermediate/openssl.conf \
-extensions server_cert -batch -days 1825 -notext -md sha256 \
-in csr/ui-server.csr.pem \
-out certs/ui-server.cert.pem
chmod 444 certs/ui-server.cert.pem
cat certs/ui-server.cert.pem $CA_PATH/intermediate/certs/intermediate.cert.pem > certs/ui-server-chain.cert.pem
openssl x509 -noout -text \
-in certs/ui-server.cert.pem
openssl verify -CAfile $CA_PATH/intermediate/certs/ca-chain.cert.pem \
certs/ui-server.cert.pem
|
picodotdev/blog-ejemplos
|
ConsulVaultNomadDatacenter/server-certs.sh
|
Shell
|
unlicense
| 6,293 |
#!/bin/bash -x
# Configure the locale to have proper language support.
localedef -i en_US -c -f UTF-8 en_US.UTF-8
dpkg-reconfigure locales
# Configure the timezone.
echo "Europe/Zurich" > "/etc/timezone"
dpkg-reconfigure -f noninteractive tzdata
# Set the machine’s hostname.
echo "DE0-Nano-SoC" > "/etc/hostname"
tee "/etc/hosts" >"/dev/null" <<EOF
127.0.0.1 localhost
127.0.1.1 DE0-Nano-SoC
EOF
# Create the “/etc/network/interfaces” file that describes the network
# interfaces available on the board.
tee "/etc/network/interfaces" > "/dev/null" <<EOF
# interfaces(5) file used by ifup(8) and ifdown(8)
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto eth0
iface eth0 inet dhcp
EOF
# DNS configuration for name resolution. We use google's public DNS server here.
sudo tee "/etc/resolv.conf" > "/dev/null" <<EOF
nameserver 8.8.8.8
EOF
# Configure Ubuntu Core to display a login shell on the serial console once the
# kernel boots. We had previously configured U-Boot to supply the command-line
# argument "console=ttyS0,115200" to the linux kernel. This argument instructs
# the kernel to use serial console “ttyS0” as the boot shell, so here we choose
# to use the same serial console for the login shell-
tee "/etc/init/ttyS0.conf" > "/dev/null" <<EOF
# ttyS0 - getty
#
# This service maintains a getty on ttyS0
description "Get a getty on ttyS0"
start on runlevel [2345]
stop on runlevel [016]
respawn
exec /sbin/getty -L 115200 ttyS0 vt102
EOF
# Create a user and a password. In this example, we create a user called
# “sahand” with password "1234". Note that we compute an encrypted version of
# the password, because useradd does not allow plain text passwords to be used
# in non-interactive mode.
username="sahand"
password="1234"
encrypted_password="$(perl -e 'printf("%s\n", crypt($ARGV[0], "password"))' "${password}")"
useradd -m -p "${encrypted_password}" -s "/bin/bash" "${username}"
# Ubuntu requires the admin to be part of the "adm" and "sudo" groups, so add
# the previously-created user to the 2 groups.
addgroup ${username} adm
addgroup ${username} sudo
# Set root password to "1234" (same as previously-created user).
echo -e "${password}\n${password}\n" | passwd root
# Remove "/rootfs_config.sh" from /etc/rc.local to avoid reconfiguring system on
# next boot
tee "/etc/rc.local" > "/dev/null" <<EOF
#!/bin/sh -e
#
# rc.local
#
# This script is executed at the end of each multiuser runlevel.
# Make sure that the script will "exit 0" on success or any other
# value on error.
#
# In order to enable or disable this script just change the execution
# bits.
#
# By default this script does nothing.
exit 0
EOF
|
sahandKashani/DE1-SoC
|
DE0_Nano_SoC/DE0_Nano_SoC_demo/sw/hps/linux/rootfs/config_system.sh
|
Shell
|
unlicense
| 2,721 |
typeset -Ag FX FG BG
LSCOLORS="exfxcxdxbxegedabagacad"
LS_COLORS="di=34:ln=35:so=32:pi=33:ex=31:bd=34;46:cd=34;43:su=0;41:sg=0;46:tw=0;42:ow=0;43:"
FX=(
reset "%{[00m%}"
bold "%{[01m%}" no-bold "%{[22m%}"
italic "%{[03m%}" no-italic "%{[23m%}"
underline "%{[04m%}" no-underline "%{[24m%}"
blink "%{[05m%}" no-blink "%{[25m%}"
reverse "%{[07m%}" no-reverse "%{[27m%}"
)
for color in {000..255}; do
FG[$color]="%{[38;5;${color}m%}"
BG[$color]="%{[48;5;${color}m%}"
done
# Show all 256 colors with color number
function spectrum_ls() {
for code in {000..255}; do
print -P -- "$code: %F{$code}Test%f"
done
}
|
kloetzl/dotfiles
|
zsh/colors.zsh
|
Shell
|
unlicense
| 693 |
#!/bin/bash
rm -f *.txt *.key *.crt *.csr *.srl
exit 0
|
flipk/pfkutils
|
libprotossl/keys/clean.sh
|
Shell
|
unlicense
| 57 |
#!/usr/bin/env bash
#
# setup.python.sh: Install a specific Python version and packages for it.
# Usage: setup.python.sh <pyversion> <requirements.txt>
# Sets up custom apt sources for our TF images.
# Prevent apt install tzinfo from asking our location (assumes UTC)
export DEBIAN_FRONTEND=noninteractive
# Set up shared custom sources
apt-get update
apt-get install -y gnupg ca-certificates
# Deadsnakes: https://launchpad.net/~deadsnakes/+archive/ubuntu/ppa
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
# Set up custom sources
cat >/etc/apt/sources.list.d/custom.list <<SOURCES
# Nvidia CUDA packages: 18.04 has more available than 20.04, and we use those
deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64 /
# More Python versions: Deadsnakes
deb http://ppa.launchpad.net/deadsnakes/ppa/ubuntu focal main
deb-src http://ppa.launchpad.net/deadsnakes/ppa/ubuntu focal main
SOURCES
|
tensorflow/build
|
tf_sig_build_dockerfiles/setup.sources.sh
|
Shell
|
apache-2.0
| 982 |
#!/bin/bash
#
# Downloads and runs Google closure linter.
# Guide: https://google.github.io/styleguide/shell.xml
# Link: https://github.com/google/closure-linter
# Source: https://github.com/Datamart/Workspace/blob/master/build/jslint.sh
readonly CWD=$(cd $(dirname $0); pwd)
readonly LIB="${CWD}/lib"
readonly TMP="${CWD}/tmp"
# TODO(user): Replace to project related path.
readonly JS_SOURCES="${CWD}/../src"
readonly JS_LINTER_ZIP="closure-linter.zip"
readonly JS_LINTER_URL="https://github.com/google/closure-linter/archive/v2.3.19.zip"
readonly WGET="$(which wget)"
readonly CURL="$(which curl)"
readonly PYTHON="$(which python)"
readonly CUSTOM_TAGS="version,example,static,namespace,requires,event"
#
# Downloads closure linter.
#
function download() {
if [[ ! -e "$(which gjslint)" ]]; then
echo "Downloading closure linter:"
mkdir -p "${LIB}"
rm -rf "${TMP}" && mkdir "${TMP}" && cd "${TMP}"
if [[ -n "$CURL" ]]; then
$CURL -L "${JS_LINTER_URL}" > "${TMP}/${JS_LINTER_ZIP}"
else
$WGET "${JS_LINTER_URL}" -O "${TMP}/${JS_LINTER_ZIP}"
fi
echo "Done"
echo "Extracting closure linter: "
unzip -q "${TMP}/${JS_LINTER_ZIP}" -d "${LIB}"
echo "Done"
echo "Installing closure linter:"
cd "${LIB}/"closure-linter-*
$PYTHON setup.py --quiet build && sudo $PYTHON setup.py --quiet install
echo "Done"
cd "${CWD}" && rm -rf "${TMP}"
fi
}
#
# Runs closure linter.
#
function run() {
echo "Running closure linter:"
local GJSLINT="$(which gjslint)"
local FIXJSSTYLE="$(which fixjsstyle)"
if [[ -d "${JS_SOURCES}" ]]; then
$FIXJSSTYLE --strict \
--custom_jsdoc_tags "${CUSTOM_TAGS}" \
-x "${CWD}/externs.js" \
-r "${JS_SOURCES}"
# $GJSLINT --strict \
# --custom_jsdoc_tags "${CUSTOM_TAGS}" \
# -x "${CWD}/externs.js" \
# -r "${JS_SOURCES}"
fi
echo "Done"
}
#
# The main function.
#
function main() {
download
run
}
main "$@"
|
Datamart/Komito
|
build/jslint.sh
|
Shell
|
apache-2.0
| 2,018 |
#!/usr/bin/env bash
# Purpose: print current state of git tree (for logging purposes)
# Usage: git_info.sh
set -o errexit -o nounset
if ! GITEXE=$(which git); then
echo "No git executable in path. Exiting."
exit 1
fi
echo " $(git describe --all --long --dirty)"
# All active changes (un-/staged) to tracked files
git --no-pager diff HEAD -p --stat
|
openstack/training-labs
|
labs/osbash/tools/git_info.sh
|
Shell
|
apache-2.0
| 361 |
pt-query-digest --user=root --password=root --review h=192.168.214.131,D=slow_query_log,t=global_query_review --history h=192.168.214.131,D=slow_query_log,t=global_query_review_history --no-report --limit=0% --filter="\$event->{Bytes} = length(\$event->{arg}) and \$event->{hostname}=\"$HOSTNAME\"" slow.log
#--create-review-table 当使用--review参数把分析结果输出到表中时,如果没有表就自动创建。
#--create-history-table 当使用--history参数把分析结果输出到表中时,如果没有表就自动创建。
#--filter 对输入的慢查询按指定的字符串进行匹配过滤后再进行分析
#--limit限制输出结果百分比或数量,默认值是20,即将最慢的20条语句输出,如果是50%则按总响应时间占比从大到小排序,输出到总和达到50%位置截止。
#--host mysql服务器地址
#--user mysql用户名
#--password mysql用户密码
#--history 将分析结果保存到表中,分析结果比较详细,下次再使用--history时,如果存在相同的语句,且查询所在的时间区间和历史表中的不同则
#会记录到数据表中,可以通过查询同一CHECKSUM来比较某类型查询的历史变化。
#--review 将分析结果保存到表中,这个分析只是对查询条件进行参数化,一个类型的查询一条记录,比较简单。当下次使用--review时,如果存在相
#同的语句分析,就不会记录到数据表中。
#--output 分析结果输出类型,值可以是report(标准分析报告)、slowlog(Mysql slow log)、json、json-anon,一般使用report,以便于阅读。
#--since 从什么时间开始分析,值为字符串,可以是指定的某个”yyyy-mm-dd [hh:mm:ss]”格式的时间点,也可以是简单的一个时间值:s(秒)、h(小时)、m(分钟)、d(天),如12h就表示从12小时前开始统计。
#--until 截止时间,配合—since可以分析一段时间内的慢查询。
|
zhujzhuo/anemometerAudit_SQL
|
digest-slow.sh
|
Shell
|
apache-2.0
| 1,960 |
#!/bin/bash
# This script must be run with sudo.
# for libopencv-dev-2.4
add-apt-repository -y ppa:kubuntu-ppa/backports
# for proto2.5
add-apt-repository -y ppa:chris-lea/protobuf
apt-get -y update
# remove CONDA directory
rm -rf $CONDA_DIR
# invoke BVLC caffe scripts
./caffe-public/scripts/travis/travis_install.sh
|
anfeng/CaffeOnSpark
|
scripts/travis/travis_install.sh
|
Shell
|
apache-2.0
| 318 |
#!/bin/sh
export PATH=$GOPATH/bin:$PATH
# /tmp isn't moutned exec on most systems, so we can't actually start
# containers that are created there.
export SRC_DIR=$(pwd)
export LXD_DIR=$(mktemp -d -p $(pwd))
chmod 777 "${LXD_DIR}"
export LXD_CONF=$(mktemp -d)
export LXD_FUIDMAP_DIR=${LXD_DIR}/fuidmap
mkdir -p ${LXD_FUIDMAP_DIR}
BASEURL=https://127.0.0.1:18443
RESULT=failure
set -e
if [ -n "$LXD_DEBUG" ]; then
set -x
fi
if [ "$USER" != "root" ]; then
echo "The testsuite must be run as root."
exit 1
fi
for dep in lxd lxc curl jq git xgettext sqlite3 msgmerge msgfmt; do
type $dep >/dev/null 2>&1 || (echo "Missing dependency: $dep" && exit 1)
done
echo "==> Running the LXD testsuite"
BASEURL=https://127.0.0.1:18443
my_curl() {
curl -k -s --cert "${LXD_CONF}/client.crt" --key "${LXD_CONF}/client.key" $@
}
wait_for() {
op=$($@ | jq -r .operation)
my_curl $BASEURL$op/wait
}
lxc() {
set +x
INJECTED=0
CMD="$(which lxc)"
for arg in $@; do
if [ "$arg" = "--" ]; then
INJECTED=1
CMD="$CMD \"--config\" \"${LXD_CONF}\" $debug"
CMD="$CMD \"--debug\""
CMD="$CMD --"
else
CMD="$CMD \"$arg\""
fi
done
if [ "$INJECTED" = "0" ]; then
CMD="$CMD \"--config\" \"${LXD_CONF}\" $debug"
fi
if [ -n "$LXD_DEBUG" ]; then
set -x
fi
eval "$CMD"
}
wipe() {
if type btrfs >/dev/null 2>&1; then
rm -Rf "$1" 2>/dev/null || true
if [ -d "$1" ]; then
find "$1" | tac | xargs btrfs subvolume delete >/dev/null 2>&1 || true
fi
fi
rm -Rf "$1"
}
cleanup() {
set +x
if [ -n "$LXD_INSPECT" ]; then
echo "To poke around, use:\n LXD_DIR=$LXD_DIR sudo -E $GOPATH/bin/lxc COMMAND --config ${LXD_CONF}"
read -p "Tests Completed ($RESULT): hit enter to continue" x
fi
echo "==> Cleaning up"
# Try to stop all the containers
my_curl "$BASEURL/1.0/containers" | jq -r .metadata[] 2>/dev/null | while read -r line; do
wait_for my_curl -X PUT "$BASEURL$line/state" -d "{\"action\":\"stop\",\"force\":true}"
done
# kill the lxds which share our pgrp as parent
mygrp=`awk '{ print $5 }' /proc/self/stat`
for p in `pidof lxd`; do
pgrp=`awk '{ print $5 }' /proc/$p/stat`
if [ "$pgrp" = "$mygrp" ]; then
kill -9 $p
fi
done
# Apparently we need to wait a while for everything to die
sleep 3
for dir in ${LXD_CONF} ${LXD_DIR} ${LXD2_DIR} ${LXD3_DIR} ${LXD4_DIR} ${LXD_MIGRATE_DIR} ${LXD_SERVERCONFIG_DIR}; do
[ -n "${dir}" ] && wipe "${dir}"
done
rm -f devlxd-client || true
echo ""
echo ""
echo "==> Test result: $RESULT"
}
trap cleanup EXIT HUP INT TERM
if [ -z "`which lxc`" ]; then
echo "==> Couldn't find lxc" && false
fi
. ./basic.sh
. ./concurrent.sh
. ./exec.sh
. ./database.sh
. ./deps.sh
. ./fuidshift.sh
. ./migration.sh
. ./remote.sh
. ./signoff.sh
. ./snapshots.sh
. ./static_analysis.sh
. ./config.sh
. ./serverconfig.sh
. ./profiling.sh
. ./fdleak.sh
. ./database_update.sh
. ./devlxd.sh
if [ -n "$LXD_DEBUG" ]; then
debug=--debug
fi
spawn_lxd() {
set +x
# LXD_DIR is local here because since `lxc` is actually a function, it
# overwrites the environment and we would lose LXD_DIR's value otherwise.
local LXD_DIR
addr=$1
lxddir=$2
shift
shift
echo "==> Spawning lxd on $addr in $lxddir"
(LXD_DIR=$lxddir lxd $debug --tcp $addr $extraargs $* 2>&1 & echo $! > $lxddir/lxd.pid) | tee $lxddir/lxd.log &
echo "==> Confirming lxd on $addr is responsive"
alive=0
while [ $alive -eq 0 ]; do
[ -e "${lxddir}/unix.socket" ] && LXD_DIR=$lxddir lxc finger && alive=1
sleep 1s
done
echo "==> Setting trust password"
LXD_DIR=$lxddir lxc config set core.trust_password foo
if [ -n "$LXD_DEBUG" ]; then
set -x
fi
}
spawn_lxd 127.0.0.1:18443 $LXD_DIR
export LXD2_DIR=$(mktemp -d -p $(pwd))
chmod 777 "${LXD2_DIR}"
spawn_lxd 127.0.0.1:18444 "${LXD2_DIR}"
# allow for running a specific set of tests
if [ "$#" -gt 0 ]; then
test_$1
RESULT=success
exit
fi
echo "==> TEST: commit sign-off"
test_commits_signed_off
echo "==> TEST: doing static analysis of commits"
static_analysis
echo "==> TEST: checking dependencies"
test_check_deps
echo "==> TEST: Database schema update"
test_database_update
echo "==> TEST: lxc remote url"
test_remote_url
echo "==> TEST: lxc remote administration"
test_remote_admin
echo "==> TEST: basic usage"
test_basic_usage
echo "==> TEST: concurrent exec"
test_concurrent_exec
echo "==> TEST: concurrent startup"
test_concurrent
echo "==> TEST: lxc remote usage"
test_remote_usage
echo "==> TEST: snapshots"
test_snapshots
echo "==> TEST: snapshot restore"
test_snap_restore
echo "==> TEST: profiles, devices and configuration"
test_config_profiles
echo "==> TEST: server config"
test_server_config
echo "==> TEST: devlxd"
test_devlxd
if type fuidshift >/dev/null 2>&1; then
echo "==> TEST: uidshift"
test_fuidshift
else
echo "==> SKIP: fuidshift (binary missing)"
fi
echo "==> TEST: migration"
test_migration
curversion=`dpkg -s lxc | awk '/^Version/ { print $2 }'`
if dpkg --compare-versions "$curversion" gt 1.1.2-0ubuntu3; then
echo "==> TEST: fdleak"
test_fdleak
else
# We temporarily skip the fdleak test because a bug in lxc is
# known to make it # fail without lxc commit
# 858377e: # logs: introduce a thread-local 'current' lxc_config (v2)
echo "==> SKIPPING TEST: fdleak"
fi
echo "==> TEST: cpu profiling"
test_cpu_profiling
echo "==> TEST: memory profiling"
test_mem_profiling
# This should always be run last
echo "==> TEST: database lock"
test_database_lock
RESULT=success
|
basvanbeek/lxd
|
test/main.sh
|
Shell
|
apache-2.0
| 5,770 |
# Helper functions for lstore-release
#
# Globals
#
LSTORE_SCRIPT_BASE=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
LSTORE_RELEASE_BASE=$(cd $(dirname "${LSTORE_SCRIPT_BASE}") && pwd)
LSTORE_TARBALL_ROOT=$LSTORE_RELEASE_BASE/tarballs/
LSTORE_LOCAL_REPOS="toolbox ibp gop lio meta release"
#
# Informational messages
#
function lstore_message() {
MESSAGE_TYPE=$1
shift
echo "$@" | >&2 sed -e "s,^,$MESSAGE_TYPE: ,g"
}
function fatal() {
lstore_message FATAL "$@"
exit 1
}
function note() {
lstore_message NOTE "$@"
}
#
# Additional helpers
#
function get_repo_master() {
for VAR in $LSTORE_HEAD_BRANCHES; do
if [ "${VAR%=*}" == "$1" ]; then
echo "${VAR##*=}"
fi
done
}
function get_repo_source_path() {
if [[ "${LSTORE_LOCAL_REPOS}" =~ "$1" ]]; then
echo "$LSTORE_RELEASE_BASE/src/$1"
else
echo "$LSTORE_RELEASE_BASE/vendor/$1"
fi
}
function build_lstore_binary() {
# In-tree builds (are for chumps)
# Make out-of-tree builds by default and then let someone force the
# build to the source tree if they're a masochist.
build_lstore_binary_outof_tree $1 $(pwd) $2
}
function build_lstore_binary_outof_tree() {
set -e
TO_BUILD=$1
SOURCE_PATH=$2
INSTALL_PREFIX=${3:-${LSTORE_RELEASE_BASE}/build/local}
BUILD_STATIC=${4:-0}
case $TO_BUILD in
apr-accre)
# Keep this in sync with CPackConfig.cmake in our fork
( set -eu
cd ${SOURCE_PATH}
./buildconf
)
${SOURCE_PATH}/configure \
--prefix=${INSTALL_PREFIX} \
--includedir=${INSTALL_PREFIX}/include/apr-ACCRE-1 \
--with-installbuilddir=${INSTALL_PREFIX}/lib/apr-ACCRE-1/build
make
make test
make install
;;
apr-util-accre)
if [ -e ${INSTALL_PREFIX}/bin/apr-ACCRE-1-config ]; then
OTHER_ARGS="--with-apr=${INSTALL_PREFIX}/bin/apr-ACCRE-1-config"
fi
# Keep this in sync with CPackConfig.cmake in our fork
${SOURCE_PATH}/configure --prefix=${INSTALL_PREFIX} $OTHER_ARGS \
--includedir=${INSTALL_PREFIX}/include/apr-util-ACCRE-1 \
--with-installbuilddir=${INSTALL_PREFIX}/lib/apr-util-ACCRE-1/build
make
make test
make install
;;
jerasure|toolbox|gop|ibp|lio|czmq|gridftp)
EXTRA_ARGS=""
MAKE_COMMAND="make install"
if [ $BUILD_STATIC -ne 0 ]; then
EXTRA_ARGS="-DCMAKE_C_COMPILER=/usr/local//Cellar/llvm36/3.6.2/share/clang-3.6/tools/scan-build/ccc-analyzer"
MAKE_COMMAND="/usr/local//Cellar/llvm/3.6.2/bin/scan-build -o $(pwd) make"
fi
cmake ${SOURCE_PATH} ${EXTRA_ARGS} \
-DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} \
-DCMAKE_INSTALL_PREFIX=${INSTALL_PREFIX}
$MAKE_COMMAND
;;
release)
:
;;
*)
fatal "Invalid package: $TO_BUILD"
;;
esac
}
function build_lstore_package() {
set -e
TO_BUILD=$1
SOURCE_PATH=${2:-$(get_repo_source_path ${TO_BUILD})}
TAG_NAME=${3:-test}
DISTRO_NAME=${4:-undefined}
case $DISTRO_NAME in
undefined)
CPACK_ARG=""
CMAKE_ARG=""
NATIVE_PKG=""
PKG_TYPE=""
;;
ubuntu-*|debian-*)
CPACK_ARG="-G DEB"
CMAKE_ARG="-DCPACK_GENERATOR=DEB:TGZ:TBZ2 -DCPACK_SOURCE_GENERATOR=DEB"
NATIVE_PKG="cp -ra $SOURCE_PATH ./ ; pushd $PACKAGE ; dpkg-buildpackage -uc -us ; popd"
PKG_TYPE="deb"
;;
centos-*)
CPACK_ARG="-G RPM"
CMAKE_ARG="-DCPACK_GENERATOR=RPM:TGZ:TBZ2 -DCPACK_SOURCE_GENERATOR=RPM"
NATIVE_PKG=""
PKG_TYPE="rpm"
;;
*)
fatal "Unexpected distro name $DISTRO_NAME"
;;
esac
if [ "$TO_BUILD" == "apr-accre" ]; then
( set -eu
cd ${SOURCE_PATH}
./buildconf
)
fi
case $TO_BUILD in
apr-accre|apr-util-accre)
ls -l $SOURCE_PATH/CPackConfig.cmake
cpack $CPACK_ARG --config $SOURCE_PATH/CPackConfig.cmake \
--debug --verbose "-DCPACK_VERSION=$TAG_NAME" || \
fatal "$(cat _CPack_Packages/*/InstallOutput.log)"
;;
czmq)
eval $NATIVE_PKG
;;
jerasure|lio|ibp|gop|toolbox|gridftp|meta)
# This is gross, but works for now..
set -x
cmake -DWANT_PACKAGE:BOOL=ON "-DLSTORE_PROJECT_VERSION=$TAG_NAME"\
$(echo "$CMAKE_ARG" | tr ':' ';') --debug --verbose $SOURCE_PATH
set +x
make package
;;
release)
case $PKG_TYPE in
rpm)
cmake $(echo "$CMAKE_ARG" | tr ':' ';') --debug --verbose \
-DCMAKE_INSTALL_PREFIX="/" $SOURCE_PATH/rpm-release
make package
;;
deb)
cmake $(echo "$CMAKE_ARG" | tr ':' ';') --debug --verbose \
-DCMAKE_INSTALL_PREFIX="/" $SOURCE_PATH/deb-release
make package
;;
*)
:
;;
esac
;;
*)
fatal "Invalid package: $TO_BUILD"
;;
esac
}
function get_cmake_tarballs(){
if [ ! -d ${LSTORE_TARBALL_ROOT} ]; then
mkdir ${LSTORE_TARBALL_ROOT}
fi
curl https://cmake.org/files/v3.3/cmake-3.3.2-Linux-x86_64.tar.gz > \
${LSTORE_TARBALL_ROOT}/cmake-3.3.2-Linux-x86_64.tar.gz
}
function build_helper() {
# Don't copy/paste code twice for build-local and build-external
set -e
BUILD_BASE="$LSTORE_RELEASE_BASE/build"
SOURCE_BASE="$LSTORE_RELEASE_BASE/src"
VENDOR_BASE="$LSTORE_RELEASE_BASE/vendor"
mkdir -p $LSTORE_RELEASE_BASE/build/logs
PREFIX=$LSTORE_RELEASE_BASE/build/local
check_cmake
if [ $1 == "STATIC" ]; then
STATIC=1
PREFIX="${PREFIX}-static"
shift
else
STATIC=0
fi
pushd $BUILD_BASE
for p in $@; do
TARGET="${p}"
if [ $STATIC -ne 0 ]; then
TARGET="${p}-static"
fi
BUILT_FLAG="${PREFIX}/built-$TARGET"
if [ -e $BUILT_FLAG ]; then
note "Not building $TARGET, was already built. To change this behavior,"
note " remove $BUILT_FLAG"
continue
fi
[ ! -d $TARGET ] && mkdir -p $TARGET
pushd $TARGET
SOURCE_PATH=$(get_repo_source_path ${p})
build_lstore_binary_outof_tree ${p} ${SOURCE_PATH} ${PREFIX} ${STATIC} 2>&1 | tee $LSTORE_RELEASE_BASE/build/logs/${TARGET}-build.log
[ ${PIPESTATUS[0]} -eq 0 ] || fatal "Could not build ${TARGET}"
touch $BUILT_FLAG
popd
done
popd
}
function load_github_token() {
if [ ! -z "${LSTORE_GITHUB_TOKEN+}" ]; then
return
elif [ -e $HOME/.lstore_release ]; then
source $HOME/.lstore_release
fi
set +u
[ -z "${LSTORE_GITHUB_TOKEN}" ] && \
fatal "Need a github authentication token to perform this action. To get
a token, use the following FAQ (be sure to remove all scopes).
https://help.github.com/articles/creating-an-access-token-for-command-line-use/
This token should be set to \$LSTORE_GITHUB_TOKEN. Alternately, the file
\$HOME/.lstore_release can be used to store secrets. The following will set
your github token only when needed:
export LSTORE_GITHUB_TOKEN=<token from github page>"
set -u
return 0
}
function create_release_candidate() {
# Make a release candidate in the current directory
PROPOSED_TAG=$1
PREVIOUS_TAG=$2
PROPOSED_BRANCH=$3
PROPOSED_URL=https://github.com/accre/lstore-${REPO}/tree/ACCRE_${PROPOSED_TAG}
PROPOSED_DIFF=https://github.com/accre/lstore-${REPO}/compare/${PREVIOUS_TAG}...ACCRE_${PROPOSED_TAG}
# Sanity check things look okay.
RET="$(get_repo_status $(pwd))"
GIT=${RET% *}
CLEAN=${RET##* }
if [ $CLEAN != "CLEAN" ]; then
fatal "Package $REPO isn't clean."
fi
git show-ref "ACCRE_${PROPOSED_TAG}" &>/dev/null && \
fatal "The release ${PROPOSED_TAG} already exists"
if [ ! -z "$(git branch --list $PROPOSED_BRANCH)" ]; then
git checkout $PROPOSED_BRANCH
else
fatal "Could not find release branch $PROPOSED_BRANCH"
fi
NEW_CHANGELOG=$(update_changelog ${PROPOSED_TAG} ${PREVIOUS_TAG})
git commit CHANGELOG.md -m "Release ${PROPOSED_TAG}
$NEW_CHANGELOG"
git tag -a "ACCRE_${PROPOSED_TAG}" -m "Release ${PROPOSED_TAG}
$NEW_CHANGELOG"
}
function update_changelog() {
# Modify the changelog in the current directory
PROPOSED_TAG=$1
PREVIOUS_TAG=$2
CURRENT_TAG=${3:-$(git rev-parse HEAD)}
# TODO: config this
UPSTREAM_REMOTE=origin
ORIGIN_REMOTE=origin
TARGET_BRANCH=accre-release
REPO=$(basename $(pwd))
PROPOSED_URL=https://github.com/accre/lstore-${REPO}/tree/ACCRE_${PROPOSED_TAG}
PROPOSED_DIFF=https://github.com/accre/lstore-${REPO}/compare/${PREVIOUS_TAG}...ACCRE_${PROPOSED_TAG}
# Update CHANGELOG.md
echo -n "# **[$PROPOSED_TAG]($PROPOSED_URL)** $(date '+(%F)')
## Changes ([full changelog]($PROPOSED_DIFF))
$(git log --oneline --no-merges ${PREVIOUS_TAG}..${CURRENT_TAG} | \
sed 's/^/* /')
" > CHANGELOG.md
cat CHANGELOG.md
git show HEAD:CHANGELOG.md >> CHANGELOG.md
}
function check_cmake(){
# Obnoxiously, we need cmake 2.8.12 to build RPM, and even Centos7 only
# packages 2.8.11
set +e
# TODO: Detect architechture
CMAKE_LOCAL_TARBALL=${LSTORE_TARBALL_ROOT}/cmake-3.3.2-Linux-x86_64.tar.gz
CMAKE_VERSION=$(cmake --version 2>/dev/null | head -n 1 | awk '{ print $3 }')
[ -z "$CMAKE_VERSION" ] && CMAKE_VERSION="0.0.0"
set -e
INSTALL_PATH=${1:-${LSTORE_RELEASE_BASE}/build}
IFS="." read -a VERSION_ARRAY <<< "${CMAKE_VERSION}"
if [ "${VERSION_ARRAY[0]}" -gt 2 ]; then
# We're good if we're at cmake 3
return
fi
if [[ "${VERSION_ARRAY[1]}" -lt 8 || "${VERSION_ARRAY[2]}" -lt 12 ]]; then
[ $CMAKE_VERSION == "0.0.0" ] || \
note "Using bundled version of cmake - the system version is too old '$CMAKE_VERSION'" &&
note "Couldn't find cmake, pulling our own"
# Download cmake
if [ ! -d $INSTALL_PATH/cmake ]; then
if [ ! -e $CMAKE_LOCAL_TARBALL ]; then
get_cmake_tarballs
fi
pushd $INSTALL_PATH
tar xzf $CMAKE_LOCAL_TARBALL
mv cmake-3.3.2-Linux-x86_64 cmake
popd
fi
export PATH="$INSTALL_PATH/cmake/bin:$PATH"
fi
hash -r
CMAKE_VERSION=$(cmake --version | head -n 1 | awk '{ print $3 }')
note "Bundled version of cmake is version '$CMAKE_VERSION'"
note "Bundled cmake can be found at $(which cmake)"
}
|
PerilousApricot/lstore
|
scripts/functions.sh
|
Shell
|
apache-2.0
| 11,314 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.