code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
source1='*10000.0*renamed.fa'
source2='*7000.3000*renamed.fa'
sink='*9900.100*renamed.fa'
combo='source.0.source.30.sink.01.combined.fa'
python make-meta.py $combo $source1 $source2 $sink
mv $combo.meta $combo-qiime/metadata.txt
cd $combo-qiime
biom convert -i otu_table.biom -o otus.txt -b
R --vanilla --slave < example.r
cd /mnt/data1/lakelanier/16S-1
source1='*10000.0*renamed.fa'
source2='*7000.3000*renamed.fa'
sink='*9990.10*renamed.fa'
combo='source.0.source.30.sink.001.combined.fa'
python make-meta.py $combo $source1 $source2 $sink
mv $combo.meta $combo-qiime/metadata.txt
cd $combo-qiime
biom convert -i otu_table.biom -o otus.txt -b
R --vanilla --slave < example.r
cd /mnt/data1/lakelanier/16S-1
|
germs-lab/non-point-source-tracking
|
automation/single-spike/30-2-st.sh
|
Shell
|
mit
| 711 |
_homebrew-installed() {
type brew &> /dev/null
}
FOUND_RBENV=0
rbenvdirs=("$HOME/.rbenv" "/usr/local/rbenv" "/opt/rbenv" "/usr/local/opt/rbenv")
if _homebrew-installed && rbenv_homebrew_path=$(brew --prefix rbenv 2>/dev/null); then
rbenvdirs=($rbenv_homebrew_path "${rbenvdirs[@]}")
unset rbenv_homebrew_path
fi
for rbenvdir in "${rbenvdirs[@]}" ; do
if [ -d $rbenvdir/bin -a $FOUND_RBENV -eq 0 ] ; then
FOUND_RBENV=1
if [[ $RBENV_ROOT = '' ]]; then
RBENV_ROOT=$rbenvdir
fi
export RBENV_ROOT
export PATH=${rbenvdir}/bin:$PATH
eval "$(rbenv init --no-rehash - zsh)"
alias rubies="rbenv versions"
alias gemsets="rbenv gemset list"
function current_ruby() {
echo "$(rbenv version-name)"
}
function current_gemset() {
echo "$(rbenv gemset active 2&>/dev/null | head -n1)"
}
function gems {
local rbenv_path=$(rbenv prefix)
gem list $@ | sed -E \
-e "s/\([0-9a-z, \.]+( .+)?\)/$fg[blue]&$reset_color/g" \
-e "s|$(echo $rbenv_path)|$fg[magenta]\$rbenv_path$reset_color|g" \
-e "s/$current_ruby@global/$fg[yellow]&$reset_color/g" \
-e "s/$current_ruby$current_gemset$/$fg[green]&$reset_color/g"
}
function rbenv_prompt_info() {
# if [[ -n $(current_gemset) ]] ; then
# echo "$(current_ruby)@$(current_gemset)"
# else
echo "$(current_ruby)"
# fi
}
fi
done
unset rbenvdir
if [ $FOUND_RBENV -eq 0 ] ; then
alias rubies='ruby -v'
function gemsets() { echo 'not supported' }
function rbenv_prompt_info() { echo "system: $(ruby -v | cut -f-2 -d ' ')" }
fi
|
larrylv/oh-my-zsh
|
plugins/rbenv/rbenv.plugin.zsh
|
Shell
|
mit
| 1,631 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3269-1
#
# Security announcement date: 2015-05-22 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:24 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: armv7l
#
# Vulnerable packages fix on version:
# - postgresql-9.1:9.1.16-0+deb7u1
#
# Last versions recommanded by security team:
# - postgresql-9.1:9.1.24-0+deb7u1
#
# CVE List:
# - CVE-2015-3165
# - CVE-2015-3166
# - CVE-2015-3167
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade postgresql-9.1=9.1.24-0+deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/armv7l/2015/DSA-3269-1.sh
|
Shell
|
mit
| 678 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2292-1
#
# Security announcement date: 2011-08-11 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:17 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - isc-dhcp:4.1.1-P1-15+squeeze3
#
# Last versions recommanded by security team:
# - isc-dhcp:4.1.1-P1-15+squeeze10
#
# CVE List:
# - CVE-2011-2748
# - CVE-2011-2749
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade isc-dhcp=4.1.1-P1-15+squeeze10 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2011/DSA-2292-1.sh
|
Shell
|
mit
| 658 |
#!/bin/sh
USAGE="usage: gitflow gh-pages [-c | --commit]\n\n"
remoteUrl=
has_remote=0
first_commit=0
branch_exists=0
while test $# != 0
do
case "$1" in
-c | --commit)
first_commit=1
;;
-h | --help)
printf "${USAGE}"
exit 0
;;
*)
printf "Unknown option.\n"
printf "${USAGE}"
exit 1
;;
esac
shift
done
# Gh-Pages Branch
# ===============
# Check if Gh-Pages branch exists.
if git show-ref --verify -q refs/heads/gh-pages; then
printf "Error: gh-pages branch already exists.\n"
else
# Check if the remote repository exists.
remoteUrl=$(git ls-remote --get-url)
if git ls-remote --exit-code "$remoteUrl" HEAD &> /dev/null; then
has_remote=1
fi
# Create Gh-Pages branch.
git checkout master &&
git checkout --orphan gh-pages &&
git rm -rf .
if [ "$first_commit" -eq 1 ]; then
echo "Docs coming soon." > index.html &&
git add index.html &&
git commit -a -m "initial gh-pages commit"
fi
if [ "$has_remote" -eq 1 ]; then
git push
fi
fi
|
ajthor/gitflow
|
bin/gh-pages.sh
|
Shell
|
mit
| 1,002 |
#!/usr/bin/env bash
echo "Generating sample files with phtools-related tags"
# ********** CITY ************
# cfile="tag_city0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:City=MWG-Петербург" "$cfile"
#
# cfile="tag_city1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:City=IPTC-Петербург" "-XMP:City=XMP-Москва" "-XMP:LocationShownCity=XMP-ShownCity" "$cfile"
#
# cfile="tag_city2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:City=" "-XMP:City=XMP-Москва" "-XMP:LocationShownCity=XMP-ShownCity" "$cfile"
#
# cfile="tag_city3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:City=" "-XMP:City=" "-XMP:LocationShownCity=XMP-ShownCity" "$cfile"
# ********** Collections ************
# XMP-mwg-coll:Collections (struct+)
# CollectionName
# CollectionURI
# cfile="tag_collections0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-XMP-mwg-coll:Collections+={CollectionName=mwg_collname1, CollectionURI=mwg_colluri1}" "-XMP-mwg-coll:Collections+={CollectionName=mwg_collname2, CollectionURI=mwg_colluri2}" "$cfile"
#
# ********** CREATOR ************
# EXIF:Artist, IPTC:By-line, XMP-dc:Creator
# cfile="tag_creator0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:Creator-=mwg1" "-MWG:Creator+=mwg1" "-MWG:Creator-=mwg2" "-MWG:Creator+=mwg2" "$cfile"
#
# cfile="tag_creator1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:Artist=exif_artist1; exif_artist2; exif_artist3" "-IPTC:By-line+=iptc_by-line1" "-IPTC:By-line+=iptc_by-line2" "-IPTC:By-line+=iptc_by-line3" "-XMP:Creator+=xmp-creator1" "-XMP:Creator+=xmp-creator2" "-XMP:Creator+=xmp-creator3" "$cfile"
#
# cfile="tag_creator2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:Artist=" "-IPTC:By-line+=iptc_by-line1" "-IPTC:By-line+=iptc_by-line2" "-IPTC:By-line+=iptc_by-line3" "-XMP:Creator+=xmp-creator1" "-XMP:Creator+=xmp-creator2" "-XMP:Creator+=xmp-creator3" "$cfile"
#
# cfile="tag_creator3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:Artist=" "-IPTC:By-line=" "-XMP:Creator+=xmp-creator1" "-XMP:Creator+=xmp-creator2" "-XMP:Creator+=xmp-creator3" "$cfile"
# ********** COPYRIGHT ************
# EXIF:Artist, IPTC:By-line, XMP-dc:Creator
# cfile="tag_copyright0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:Copyright=mwg_AndrewBiz" "$cfile"
#
# cfile="tag_copyright1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:Copyright=exif_copyright" "-IPTC:CopyrightNotice=iptc_copyrightnotice" "-XMP:Rights=xmp-rights" "$cfile"
#
# cfile="tag_copyright2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:Copyright=" "-IPTC:CopyrightNotice=iptc_copyrightnotice" "-XMP:Rights=xmp-rights" "$cfile"
#
# cfile="tag_copyright3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:Copyright=" "-IPTC:CopyrightNotice=" "-XMP:Rights=xmp-rights" "$cfile"
# ********** COUNTRY ************
# IPTC:Country-PrimaryLocationName, XMP-photoshop:Country, XMP-iptcExt:LocationShownCountryName
# cfile="tag_country0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:Country=mwg_country" "$cfile"
#
# cfile="tag_country1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Country-PrimaryLocationName=iptc_country-primarylocationname" "-XMP:Country=xmp_country" "-XMP:LocationShownCountryName=xmp_locationshowncountryname" "$cfile"
#
# cfile="tag_country2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Country-PrimaryLocationName=" "-XMP:Country=xmp_country" "-XMP:LocationShownCountryName=xmp_locationshowncountryname" "$cfile"
#
# cfile="tag_country3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Country-PrimaryLocationName=" "-XMP:Country=" "-XMP:LocationShownCountryName=xmp_locationshowncountryname" "$cfile"
# ********** Keywords ************
# IPTC:Keywords, XMP:Subject
# cfile="tag_keywords0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:Keywords+=mwg_keyword1" "-MWG:Keywords+=mwg_keyword2" "$cfile"
#
# cfile="tag_keywords1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Keywords+=iptc_kw1" "-IPTC:Keywords+=iptc_kw2" "-XMP:Subject+=xmp_subj1" "-XMP:Subject+=xmp_subj2" "$cfile"
#
# cfile="tag_keywords2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Keywords=" "-XMP:Subject+=xmp_subj1" "-XMP:Subject+=xmp_subj2" "$cfile"
# ********** LOCATION ************
# IPTC:Sub-location, XMP:Location, XMP:LocationShownSublocation
# cfile="tag_location0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:Location=mwg_location" "$cfile"
#
# cfile="tag_location1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Sub-location=iptc_sublocation" "-XMP:Location=xmp_location" "-XMP:LocationShownSublocation=xmp_locationshownsublocation" "$cfile"
#
# cfile="tag_location2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Sub-location=" "-XMP:Location=xmp_location" "-XMP:LocationShownSublocation=xmp_locationshownsublocation" "$cfile"
#
# cfile="tag_location3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Sub-location=" "-XMP:Location=" "-XMP:LocationShownSublocation=xmp_locationshownsublocation" "$cfile"
# ********** STATE ************
# IPTC:Province-State, XMP-photoshop:State, XMP-iptcExt:LocationShownProvinceState
# cfile="tag_state0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:State=mwg_state" "$cfile"
#
# cfile="tag_state1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Province-State=iptc_provincestate" "-XMP:State=xmp_state" "-XMP:LocationShownProvinceState=xmp_locationshownprovincestate" "$cfile"
#
# cfile="tag_state2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Province-State=" "-XMP:State=xmp_state" "-XMP:LocationShownProvinceState=xmp_locationshownprovincestate" "$cfile"
#
# cfile="tag_state3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-IPTC:Province-State=" "-XMP:State=" "-XMP:LocationShownProvinceState=xmp_locationshownprovincestate" "$cfile"
# ********** DateTimeOriginal ************
# -MWG:DateTimeOriginal: EXIF:DateTimeOriginal, IPTC:DateCreated+IPTC:TimeCreated, XMP-photoshop:DateCreated
# cfile="tag_dto0.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-MWG:DateTimeOriginal=2000:01:01 00:00:00" "$cfile"
#
# cfile="tag_dto1.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:DateTimeOriginal=2001:01:01 01:01:01" "-IPTC:DateCreated=2002:02:02" "-IPTC:TimeCreated=02:02:02+02:00" "-XMP:DateCreated=2003:03:03 03:03:03" "$cfile"
#
# cfile="tag_dto2.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:DateTimeOriginal=" "-IPTC:DateCreated=2002:02:02" "-IPTC:TimeCreated=02:02:02+02:00" "-XMP:DateCreated=2003:03:03 03:03:03" "$cfile"
#
# cfile="tag_dto3.JPG"
# echo "Preparing file $cfile ..."
# cp "_template.JPG" "$cfile"
# exiftool -P -overwrite_original "-EXIF:DateTimeOriginal=" "-IPTC:DateCreated=" "-IPTC:TimeCreated=" "-XMP:DateCreated=2003:03:03 03:03:03" "$cfile"
# ********** CreateDate ************
# -MWG:CreateDate: EXIF:CreateDate (EXIF:SubSecTimeDigitized), IPTC:DigitalCreationDate+IPTC:DigitalCreationTime, XMP-xmp:CreateDate
cfile="tag_cd0.JPG"
echo "Preparing file $cfile ..."
cp "_template.JPG" "$cfile"
exiftool -P -overwrite_original "-MWG:CreateDate=2010:10:10 10:10:10" "$cfile"
cfile="tag_cd1.JPG"
echo "Preparing file $cfile ..."
cp "_template.JPG" "$cfile"
exiftool -P -overwrite_original "-EXIF:CreateDate=2004:04:04 04:04:04" "-IPTC:DigitalCreationDate=2005:05:05" "-IPTC:DigitalCreationTime=05:05:05+05:00" "-XMP:CreateDate=2006:06:06 06:06:06" "$cfile"
cfile="tag_cd2.JPG"
echo "Preparing file $cfile ..."
cp "_template.JPG" "$cfile"
exiftool -P -overwrite_original "-EXIF:CreateDate=" "-IPTC:DigitalCreationDate=2005:05:05" "-IPTC:DigitalCreationTime=05:05:05+05:00" "-XMP:CreateDate=2006:06:06 06:06:06" "$cfile"
cfile="tag_cd3.JPG"
echo "Preparing file $cfile ..."
cp "_template.JPG" "$cfile"
exiftool -P -overwrite_original "-EXIF:CreateDate=" "-IPTC:DigitalCreationDate=" "-IPTC:DigitalCreationTime=" "-XMP:CreateDate=2006:06:06 06:06:06" "$cfile"
|
AndrewBiz/phtools
|
features/media/tags/gen_all_tags.sh
|
Shell
|
mit
| 9,637 |
# Regenerates Lyft project from current directory
function re() {
export INCLUDE_NON_APP_TRANSITIVE=2
export NO_SWIFTLINT=1
make -C $(git rev-parse --show-toplevel) update_dependencies targets=Lyft
}
|
sberrevoets/dotfiles
|
bash/lyft.bash
|
Shell
|
mit
| 212 |
#!/usr/bin/env bash
#------------------------------------------------------------------------------
# This script builds the solidity binary using Emscripten.
# Emscripten is a way to compile C/C++ to JavaScript.
#
# http://kripken.github.io/emscripten-site/
#
# First run install_dep.sh OUTSIDE of docker and then
# run this script inside a docker image trzeci/emscripten
#
# The documentation for solidity is hosted at:
#
# http://solidity.readthedocs.io/
#
# ------------------------------------------------------------------------------
# This file is part of solidity.
#
# solidity is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# solidity is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with solidity. If not, see <http://www.gnu.org/licenses/>
#
# (c) 2016 solidity contributors.
#------------------------------------------------------------------------------
set -ev
# We need git for extracting the commit hash
apt-get update
apt-get -y install git-core
export WORKSPACE=/src
# Boost
echo -en 'travis_fold:start:compiling_boost\\r'
cd "$WORKSPACE"/boost_1_57_0
# if b2 exists, it is a fresh checkout, otherwise it comes from the cache
# and is already compiled
test -e b2 && (
sed -i 's|using gcc ;|using gcc : : /usr/local/bin/em++ ;|g' ./project-config.jam
sed -i 's|$(archiver\[1\])|/usr/local/bin/emar|g' ./tools/build/src/tools/gcc.jam
sed -i 's|$(ranlib\[1\])|/usr/local/bin/emranlib|g' ./tools/build/src/tools/gcc.jam
./b2 link=static variant=release threading=single runtime-link=static \
thread system regex date_time chrono filesystem unit_test_framework program_options random
find . -name 'libboost*.a' -exec cp {} . \;
rm -rf b2 libs doc tools more bin.v2 status
)
echo -en 'travis_fold:end:compiling_boost\\r'
# Build dependent components and solidity itself
echo -en 'travis_fold:start:compiling_solidity\\r'
cd $WORKSPACE
mkdir -p build
cd build
emcmake cmake \
-DCMAKE_BUILD_TYPE=Release \
-DEMSCRIPTEN=1 \
-DBoost_FOUND=1 \
-DBoost_USE_STATIC_LIBS=1 \
-DBoost_USE_STATIC_RUNTIME=1 \
-DBoost_INCLUDE_DIR="$WORKSPACE"/boost_1_57_0/ \
-DBoost_CHRONO_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_chrono.a \
-DBoost_CHRONO_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_chrono.a \
-DBoost_DATE_TIME_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_date_time.a \
-DBoost_DATE_TIME_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_date_time.a \
-DBoost_FILESYSTEM_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_filesystem.a \
-DBoost_FILESYSTEM_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_filesystem.a \
-DBoost_PROGRAM_OPTIONS_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_program_options.a \
-DBoost_PROGRAM_OPTIONS_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_program_options.a \
-DBoost_RANDOM_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_random.a \
-DBoost_RANDOM_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_random.a \
-DBoost_REGEX_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_regex.a \
-DBoost_REGEX_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_regex.a \
-DBoost_SYSTEM_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_system.a \
-DBoost_SYSTEM_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_system.a \
-DBoost_THREAD_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_thread.a \
-DBoost_THREAD_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_thread.a \
-DBoost_UNIT_TEST_FRAMEWORK_LIBRARY="$WORKSPACE"/boost_1_57_0/libboost_unit_test_framework.a \
-DBoost_UNIT_TEST_FRAMEWORK_LIBRARIES="$WORKSPACE"/boost_1_57_0/libboost_unit_test_framework.a \
-DDev_DEVCORE_LIBRARY="$WORKSPACE"/solidity/build/libdevcore/libsoldevcore.a \
-DEth_EVMASM_LIBRARY="$WORKSPACE"/solidity/build/libevmasm/libsolevmasm.a \
-DETH_STATIC=1 -DTESTS=0 \
..
emmake make -j 4
cd ..
cp build/solc/soljson.js ./
mkdir -p upload
cp soljson.js upload/
OUTPUT_SIZE=`ls -la build/solc/soljson.js`
echo "Emscripten output size: ${OUTPUT_SIZE}"
echo -en 'travis_fold:end:compiling_solidity\\r'
|
ruchevits/solidity
|
scripts/travis-emscripten/build_emscripten.sh
|
Shell
|
mit
| 4,362 |
# Set window root path. Default is `$session_root`.
# Must be called before `new_window`.
#window_root "~/Projects/{{WINDOW_NAME}}"
# Create new window. If no argument is given, window name will be based on
# layout file name.
new_window "LeadPages"
# Split window into panes.
split_v 20
split_h 66
select_pane 2
split_h 50
# Run commands.
# run_cmd "ssh vague" 2 # runs in active pane
# run_cmd "ssh vague" 3 # runs in pane 1
# run_cmd "hop modules" 1 # runs in pane 2
run_cmd "hop Projects" 0 # runs in pane 2
run_cmd "hop Projects" 1 # runs in pane 2
run_cmd "hop Projects" 2 # runs in pane 2
run_cmd "hop Projects" 3 # runs in pane 2
# Paste text
#send_keys "top" # paste into active pane
#send_keys "date" 1 # paste into pane 1
# Set active pane.
select_pane 0
|
truetone/tmuxifier
|
layouts/lp.window.sh
|
Shell
|
mit
| 787 |
set -xe
cd groeneboekje2015
SPELL_VERSION=`dpkg -l|grep libaspell|grep -v dev|awk '{print $3}'`
DICT_VERSION=`dpkg -l|grep aspell-nl|awk '{print $3}'`
DESTINATION=aspell\_$SPELL_VERSION\_nl\_$DICT_VERSION
if [ ! -e ../$DESTINATION ]
then
mkdir ../$DESTINATION
fi
if [ -e groeneboekje2015_lemmas.txt ]
then
../check_hunspell.py verifyaspell ../$DESTINATION groeneboekje2015_lemmas.txt
else
echo Missing word list groeneboekje2015_lemmas.txt
fi
if [ -e groeneboekje2015_flexies.txt ]
then
../check_hunspell.py verifyaspell ../$DESTINATION groeneboekje2015_flexies.txt
else
echo Missing word list groeneboekje2015_flexies.txt
fi
cd ..
|
OpenTaal/compare-spell-checking
|
check_aspell_groeneboekje2015.sh
|
Shell
|
mit
| 652 |
#!/bin/bash
<%!
import common.project_utils as project
%>
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )";
SCRIPT_DIR="$( readlink -f $SCRIPT_DIR )";
cd "$SCRIPT_DIR";
SERVER_NAME="${project.get_server_name()}";
SERVERD_NAME="$SERVER_NAME";
SERVER_FULL_NAME="${project.get_server_full_name()}";
SERVER_BUS_ID=${hex(project.get_server_id())};
export PROJECT_INSTALL_DIR=$(cd ${project_install_prefix} && pwd);
source "$PROJECT_INSTALL_DIR/tools/script/common/common.sh";
if [ ! -e "$SERVERD_NAME" ]; then
SERVERD_NAME="${project.get_server_name()}d";
fi
if [ ! -e "$SERVERD_NAME" ]; then
ErrorMsg "Executable $SERVER_NAME or $SERVERD_NAME not found, run $@ failed";
exit 1;
fi
SERVER_PID_FILE_NAME="$SERVER_FULL_NAME.pid";
export LD_LIBRARY_PATH=$PROJECT_INSTALL_DIR/lib:$PROJECT_INSTALL_DIR/tools/shared:$LD_LIBRARY_PATH ;
PROFILE_DIR="$PROJECT_INSTALL_DIR/profile";
export GCOV_PREFIX="$PROFILE_DIR/gcov";
export GCOV_PREFIX_STRIP=16 ;
mkdir -p "$GCOV_PREFIX";
<%
import os
server_preload_scripts=[]
if project.get_global_option_bool('jemalloc', 'malloc', False):
jemalloc_profile_dir = str(project.get_global_option('jemalloc', 'profile_dir', 'profile'))
jemalloc_profile_dir = os.path.join('$PROJECT_INSTALL_DIR', jemalloc_profile_dir)
server_preload_scripts.append('mkdir -p "{0}" ;'.format(jemalloc_profile_dir))
server_profile_dir = os.path.join(jemalloc_profile_dir, project.get_server_name())
jemalloc_path = '$PROJECT_INSTALL_DIR/tools/shared/libjemalloc.so'
jemalloc_options = ''
jemalloc_heap_check = int(project.get_global_option('jemalloc', 'heap_check', 0))
if jemalloc_heap_check > 0:
jemalloc_options = 'prof_leak:true,lg_prof_sample:{0}'.format(jemalloc_heap_check)
jemalloc_heap_profile = str(project.get_global_option('jemalloc', 'heap_profile', ''))
if len(jemalloc_heap_profile) > 0:
jemalloc_options = jemalloc_options + ',prof:true,prof_prefix:{0}'.format(os.path.join(server_profile_dir, jemalloc_heap_profile))
server_preload_scripts.append('mkdir -p "{0}" ;'.format(server_profile_dir))
jemalloc_other_options = project.get_global_option('jemalloc', 'other_malloc_conf', '')
if len(jemalloc_other_options) > 0:
jemalloc_options = jemalloc_options + ',' + jemalloc_other_options
if jemalloc_options[0:1] == ',':
jemalloc_options = jemalloc_options[1:]
server_preload_scripts.append('export MALLOC_CONF="{0}" ;'.format(jemalloc_options))
server_preload_scripts.append('if [ -e "{0}" ]; then'.format(jemalloc_path))
server_preload_scripts.append(' export LD_PRELOAD="{0}" ;'.format(jemalloc_path))
server_preload_scripts.append('fi')
elif project.get_global_option_bool('gperftools', 'malloc', False):
gperftools_profile_dir = str(project.get_global_option('gperftools', 'profile_dir', 'profile'))
gperftools_profile_dir = os.path.join('$PROJECT_INSTALL_DIR', gperftools_profile_dir)
server_preload_scripts.append('mkdir -p "{0}" ;'.format(gperftools_profile_dir))
server_profile_dir = os.path.join(gperftools_profile_dir, project.get_server_name())
tcmalloc_path = '$PROJECT_INSTALL_DIR/tools/shared/libtcmalloc_minimal.so'
gperftools_heap_check = str(project.get_global_option('gperftools', 'heap_check', ''))
gperftools_heap_profile = str(project.get_global_option('gperftools', 'heap_profile', ''))
gperftools_cpu_profile = str(project.get_global_option('gperftools', 'cpu_profile', ''))
if len(gperftools_heap_check) > 0 or len(gperftools_heap_profile) > 0:
tcmalloc_path = '$PROJECT_INSTALL_DIR/tools/shared/libtcmalloc.so'
if len(gperftools_cpu_profile) > 0:
tcmalloc_path = '$PROJECT_INSTALL_DIR/tools/shared/libtcmalloc_and_profiler.so'
server_preload_scripts.append('mkdir -p "{0}" ;'.format(server_profile_dir))
server_preload_scripts.append('export CPUPROFILE="{0}" ;'.format(os.path.join(server_profile_dir, gperftools_cpu_profile)))
if len(gperftools_heap_profile) > 0:
server_preload_scripts.append('mkdir -p "{0}" ;'.format(server_profile_dir))
server_preload_scripts.append('export HEAPPROFILE="{0}" ;'.format(os.path.join(server_profile_dir, gperftools_heap_profile)))
if len(gperftools_heap_check) > 0:
server_preload_scripts.append('export HEAPCHECK={0} ;'.format(gperftools_heap_check))
server_preload_scripts.append('if [ -e "{0}" ]; then'.format(tcmalloc_path))
server_preload_scripts.append(' export LD_PRELOAD="{0}" ;'.format(tcmalloc_path))
server_preload_scripts.append('fi')
%>
${os.linesep.join(server_preload_scripts)}
|
atframework/atsf4g-co
|
install/tools/script/helper/template/script/common.template.sh
|
Shell
|
mit
| 4,631 |
#!/bin/bash
curl -fsSL https://get.docker.com/ | sh
# selinux
mkdir -p /data
semanage fcontext -a -t var_t /data
restorecon -v /data
# docker config
sudo mkdir /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://registry.docker-cn.com"],
"data-root": "/data/docker/lib/docker/"
}
EOF
# proxy
mkdir -p /etc/systemd/system/docker.service.d
sudo tee /etc/systemd/system/docker.service.d/proxy.conf <<-'EOF'
[Service]
Environment="HTTP_PROXY=http://127.0.0.1:8118/" "HTTPS_PROXY=http://127.0.0.1:8118/" "NO_PROXY=localhost,127.0.0.1,*.docker.io"
EOF
#systemctl show --property Environment docker
# start
systemctl enable docker.service
systemctl start docker.service
|
tudyzhb/linux-bootstrap
|
centos7/sh/docker_shell.sh
|
Shell
|
mit
| 705 |
#!/bin/sh
git_dir=$1
sub_dirs=`ls $git_dir | grep -v "\."`
for i in $sub_dirs
do
cd $git_dir/$i
local_hash=`git rev-parse --verify master`
remote_hash=`git rev-parse --verify origin/master`
need_commit=`git status --short`
need_push=`echo $local_hash | grep -v $remote_hash`
if test -n "$need_commit"; then
echo $i needs commit
elif test -n "$need_push"; then
echo $i needs a push/pull
fi
done
|
natemara/git_check
|
git_check.sh
|
Shell
|
mit
| 412 |
#!/bin/bash
# Exercise all the analysis
set -e
FILES="files/SYSTEM.PASCAL files/SYSTEM.STARTUP"
tools/list_pcode.py -m 0 $FILES > /dev/null
tools/list_pcode.py -m 1 $FILES > /dev/null
tools/list_pcode.py -m 2 $FILES > /dev/null
tools/list_pcode.py -m 3 $FILES > /dev/null
tools/list_pcode.py -m 4 $FILES > /dev/null
tools/list_pcode.py -m 5 $FILES > /dev/null
tools/list_pcode.py -m 6 $FILES > /dev/null
tools/list_pcode.py -m 7 $FILES > /dev/null
|
laanwj/sundog
|
test_tools.sh
|
Shell
|
mit
| 448 |
#!/bin/bash
patch -Np1 -i ../kbd-2.0.3-backspace-1.patch
sed -i 's/\(RESIZECONS_PROGS=\)yes/\1no/g' configure
sed -i 's/resizecons.8 //' docs/man/man8/Makefile.in
PKG_CONFIG_PATH=/tools/lib/pkgconfig ./configure --prefix=/usr --disable-vlock
make
make install
mkdir -v /usr/share/doc/kbd-2.0.3
cp -R -v docs/doc/* /usr/share/doc/kbd-2.0.3
|
nmuzychuk/lfs-build
|
build_lfs/install/kbd-2.0.3.sh
|
Shell
|
mit
| 345 |
#!/bin/bash
CLUSTER_VERSION=`pg_lsclusters -h|awk '{print $1}'`
set -e
set -x
sudo -u postgres service postgresql stop
echo "Dropping current cluster"
sudo -u postgres pg_dropcluster $CLUSTER_VERSION main
echo "Creating new cluster with UTF8 encoding"
sudo -u postgres pg_createcluster --locale=$LC_ALL $CLUSTER_VERSION main
# Allow connections from anywhere.
sed -i -e"s/^#listen_addresses =.*$/listen_addresses = '*'/" /etc/postgresql/$CLUSTER_VERSION/main/postgresql.conf
echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/$CLUSTER_VERSION/main/pg_hba.conf
sudo -u postgres service postgresql start
sudo -u postgres psql -q <<- EOF
DROP ROLE IF EXISTS admin;
CREATE ROLE admin WITH ENCRYPTED PASSWORD 'admin';
ALTER ROLE admin WITH SUPERUSER;
ALTER ROLE admin WITH LOGIN;
EOF
sudo -u postgres service postgresql stop
# Cofigure the database to use our data dir.
sed -i -e"s|data_directory =.*$|data_directory = '$DATA_DIR'|" /etc/postgresql/$CLUSTER_VERSION/main/postgresql.conf
chown -R postgres:postgres $DATA_DIR
|
mpeterson/docker-postgresql
|
prepare_postgres.sh
|
Shell
|
mit
| 1,048 |
#!/bin/bash
if [ "x${SERVER_ADDR}" == "x" ]; then
echo "Please specify the SERVER_ADDR"
exit 1
fi
sed -i s/"name: \"\""/"name: \"${SERVER_ADDR}\""/ /gearmanui/config.yml
sed -i s/"addr: \"\""/"addr: \"${SERVER_ADDR}:4730\""/ /gearmanui/config.yml
apache2ctl -D FOREGROUND
|
pandeyanshuman/docker-gearman-ui
|
run_gearmanui.sh
|
Shell
|
mit
| 282 |
#!/bin/sh
NOW=`date +%Y%m%d`
BACKUP_DIR="vagrantbox_backup"
#VM_NAME="${VM_NAME}"
VM_NAME="redmine"
BOX_NAME="ubuntu-14.04-${VM_NAME}.${NOW}.box"
if [ ! -d "${BACKUP_DIR}" ]; then mkdir ${BACKUP_DIR}; fi
ST=`vagrant status | grep -w ${VM_NAME} | awk '{print $2}'`
#echo $ST
case $ST in
poweroff) vagrant package ${VM_NAME} --output ${BACKUP_DIR}/${BOX_NAME};;
running) vagrant halt ${VM_NAME}
vagrant package ${VM_NAME} --output ${BACKUP_DIR}/${BOX_NAME}
vagrant up ${VM_NAME};;
*) echo "check vagrant status"
esac
|
ftakao2007/vagrant
|
script/vagrantbox_backup.sh
|
Shell
|
mit
| 559 |
#!/bin/bash
set -e
source "$HOME/.rvm/scripts/rvm"
local_folder=$(pwd)
RESULT=0
for test_app in $(ls test_apps)
do
if [[ $RESULT == 0 ]]
then
echo $test_app
cd $local_folder/test_apps/$test_app
RUBY_VERSION=$(cat RUBY_VERSION)
rvm use $RUBY_VERSION || rvm install $RUBY_VERSION --disable-binary && rvm use $RUBY_VERSION
gem install bundler
bundle --version | awk '{print $3}' > BUNDLER_VERSION
export BUNDLE_GEMFILE="$local_folder/test_apps/$test_app/Gemfile"
bundle install
bundle exec rake db:create:all
bundle exec rake db:migrate
bundle exec rake
RESULT=$(($RESULT + $?))
fi
done
if [ $RESULT == 0 ]
then
exit 0
else
exit 1
fi
|
webhoernchen/active_sql
|
script/run_tests.sh
|
Shell
|
mit
| 695 |
#!/usr/bin/env bash
gunicorn --reload --max-requests 1 -b 0.0.0.0:$PORT app:app
|
jongha/stock-ai
|
start.sh
|
Shell
|
mit
| 79 |
#!/bin/bash
# ============================================================
# chmod u+x make.sh
# ./make.sh
# ============================================================
# ============================================================
# Configuration:
# ============================================================
#
# PROFILE:
# AWS Profile to be used
#
# LBUCKET:
# Name of the AWS S3 Bucket to be used to upload
# Lambda Deployment Packages.
#
# SWSID:
# SWS Instance Identifier
#
# STACKNAME:
# Name of the CloudFormation Stack to be used
#
# ============================================================
PROFILE="your_aws_profile"
LBUCKET="sws.lambdas"
SWSID="swsdev"
STACKNAME="SWSStack"
# ============================================================
printf "\n"
printf "Make for SWSAPI:\n"
printf "========================================\n"
printf "AWS Profile : $PROFILE\n"
printf "Lambdas Bucket Name : $LBUCKET\n"
printf "SWSID : $SWSID\n"
printf "Stack Name : $STACKNAME\n"
# ============================================================
printf "\n"
printf "========================================\n"
printf "Preparing Lambda Packages:\n"
printf "========================================\n"
printf "directors:\n"
# ============================================================
rm -f directors.zip
if [ $? != 0 ] ; then
printf "!> FAILED to remove.\n"
exit $?
else
printf "\tRemoved\n"
fi
zip directors.zip ./directors.py
if [ $? != 0 ] ; then
printf "!> FAILED to zip.\n"
exit $?
else
printf "\tOK\n"
fi
printf "agents:\n"
# ============================================================
rm -f agents.zip
if [ $? != 0 ] ; then
printf "!> FAILED to remove.\n"
exit $?
else
printf "\tRemoved\n"
fi
zip agents.zip ./agents.py
if [ $? != 0 ] ; then
printf "!> FAILED to zip.\n"
exit $?
else
printf "\tOK\n"
fi
# ============================================================
printf "\n"
printf "========================================\n"
printf "Packaging SAM CloudFormation Templates:\n"
printf "========================================\n"
printf "sws_cfn:\n"
aws cloudformation package \
--template-file ./sws_cfn.yaml \
--s3-bucket $LBUCKET \
--output-template-file ./sws_cfn_packaged.yaml \
--profile $PROFILE
if [ $? != 0 ] ; then
printf "!> FAILED.\n"
exit $?
else
printf "\tOK\n"
fi
# ============================================================
printf "\n"
printf "========================================\n"
printf "Deploying CloudFormation ChangeSet:\n"
printf "========================================\n"
printf "sws_cfn_packaged:\n"
aws cloudformation deploy \
--profile $PROFILE \
--template-file ./sws_cfn_packaged.yaml \
--stack-name $STACKNAME \
--capabilities CAPABILITY_NAMED_IAM \
--parameter-overrides SWSID=$SWSID
if [ $? != 0 ] ; then
printf "!> FAILED.\n"
exit $?
else
printf "\tOK\n"
fi
# ============================================================
printf "\n"
printf "========================================\n"
printf "Retrieving Stack Outputs:\n"
printf "========================================\n"
# aws cloudformation describe-stacks \
# --stack-name $STACKNAME \
# --no-paginate \
# --output text \
# --query 'Stacks[0].Outputs[?OutputKey==`SWSRestApiId`]'
RESTAPIID=$(aws cloudformation describe-stacks --stack-name $STACKNAME --no-paginate --output text --query 'Stacks[0].Outputs[?OutputKey==`SWSRestApiId`]' | cut -f3)
if [ $? != 0 ] ; then
printf "!> FAILED.\n"
exit $?
else
printf "\tOK\n"
fi
if [ "${RESTAPIID}" == "" ]; then
printf "!> FAILED: Could not find SWSRestApiId Stack Output.\n"
exit 1
else
printf "\tSWSRestApiId: $RESTAPIID\n"
fi;
# ============================================================
printf "\n"
printf "========================================\n"
printf "Requesting Deployment:\n"
printf "========================================\n"
aws apigateway create-deployment \
--rest-api-id $RESTAPIID \
--stage-name $SWSID \
--stage-description "SWS REST API Stage" \
--description "Stage Deployment by make.sh" \
--no-cache-cluster-enabled
#--cache-cluster-size <value>
#--variables <value>
#--cli-input-json <value>
#--generate-cli-skeleton <value>
if [ $? != 0 ] ; then
printf "!> FAILED.\n"
exit $?
else
printf "\tOK\n"
fi
# ============================================================
printf "\n"
printf "========================================\n"
printf "Requesting JS SDK:\n"
printf "========================================\n"
aws apigateway get-sdk \
--rest-api-id $RESTAPIID \
--stage-name $SWSID \
--sdk-type javascript \
./aws_js_sdk.zip
if [ $? != 0 ] ; then
printf "!> FAILED.\n"
exit $?
else
printf "\tOK\n"
fi
# ============================================================
printf "\n"
printf "========================================\n"
printf "All Done.\n"
exit 0
|
FrancescoRizzi/AWSomesauce
|
articles/BAS1-uws/make.sh
|
Shell
|
mit
| 4,878 |
#!/usr/bin/env bash
set -e
"$(dirname "$0")/npm_install.sh" vscode-json-language-server vscode-langservers-extracted
|
ruchee/vimrc
|
vimfiles/bundle/lsp-settings/installer/install-vscode-json-language-server.sh
|
Shell
|
mit
| 119 |
#!/bin/sh
cobc -x -free cow.cbl `ls -d controllers/*` -o the.cow
|
azac/cobol-on-wheelchair
|
downhill.sh
|
Shell
|
mit
| 66 |
./compile-and-run-recursive.sh 50 5 4 7
|
antunflas/Computing-the-Optimal-Threshold-for-Q-gram-filters
|
main-recursive/test-recursive.sh
|
Shell
|
mit
| 40 |
#!/bin/sh
DIR=`pwd`
VERSION=2_0_PR3
rm -rf /tmp/sandbox
mkdir /tmp/sandbox
cp -r hello /tmp/sandbox/
cp -r src /tmp/sandbox/
cp -r web /tmp/sandbox/
cp -r README /tmp/sandbox/
cp -r LICENSE /tmp/sandbox/
cd /tmp/sandbox
perl -p -i -e "s#/../vendor#/vendor#" src/autoload.php
sudo rm -rf hello/cache/* hello/logs/* .git*
chmod 777 hello/cache hello/logs
cd ..
# avoid the creation of ._* files
export COPY_EXTENDED_ATTRIBUTES_DISABLE=true
export COPYFILE_DISABLE=true
tar zcpf $DIR/sandbox_$VERSION.tgz sandbox
sudo rm -f $DIR/sandbox_$VERSION.zip
zip -rq $DIR/sandbox_$VERSION.zip sandbox
|
noelg/symfony-news
|
create_sandbox.sh
|
Shell
|
mit
| 590 |
if ! which rvm > /dev/null; then
gpg --keyserver hkp://pool.sks-keyservers.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
curl -sSL https://get.rvm.io | bash
fi
|
jmeridth/dotfiles
|
install_rvm.sh
|
Shell
|
mit
| 220 |
#!/bin/sh
VERSION=4.9.2
tar --files-from=file.list -xjvf ../gcc-$VERSION.tar.bz2
mv gcc-$VERSION gcc-$VERSION-orig
cp -rf ./gcc-$VERSION-new ./gcc-$VERSION
diff -b --unified -Nr gcc-$VERSION-orig gcc-$VERSION > gcc-$VERSION-lpc17xx-multilib.patch
mv gcc-$VERSION-lpc17xx-multilib.patch ../../patches
rm -rf ./gcc-$VERSION
rm -rf ./gcc-$VERSION-orig
|
radix-platform/toolchains
|
sources/GNU/gcc/gcc-4.9.2/create-4.9.2-lpc17xx-multilib-patch/create.patch.sh
|
Shell
|
mit
| 357 |
#!/bin/bash
# make brew work on
1. create all users
2. create administrators group
3. add all users to administrator group
4. run:
sudo chgrp -R brew $(brew --prefix)/*
sudo chmod -R g+w $(brew --prefix)/*
|
jturolla/dotfiles
|
users.sh
|
Shell
|
mit
| 208 |
#!/bin/bash
set -e
sudo apt-get update
local_directory="`dirname \"$0\"`"
date_of_installation_start=$(date +"%s")
for file in 01_install_nodejs.sh 02_clone_repository.sh 03_install_packages.sh 04_install_postgresql.sh 05_install_modules.sh 06_initialize.sh 07_testdata.sh
do
"$local_directory/$file"
done
date_of_installation_end=$(date +"%s")
installation_duration=$(($date_of_installation_end-$date_of_installation_start))
echo "Installation took $(($installation_duration / 60))m $(($installation_duration % 60))s."
|
niccokunzmann/cp-automated-development
|
setup-zen/install.sh
|
Shell
|
mit
| 528 |
#! /bin/bash
### Load a gt4gemstone from .gs files into a standard stone
### Exits with 0 if success, 1 if failed
set -e
export GT4GEMSTONE_RELEASE_HOME="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
## Topaz refuses to exit from script if input is stdin, so redirect from /dev/zero
topaz -l -I ${GT4GEMSTONE_RELEASE_HOME}/loginSystemUser.topaz -S ${GT4GEMSTONE_RELEASE_HOME}/inputRelease.topaz < /dev/zero
if [ $? = 0 ]
then
exit 0
else
echo !!!!!!!!!!!
echo LOAD FAILED for gt4gemstone
echo !!!!!!!!!!!
exit 1
fi
|
feenkcom/gt4gemstone
|
scripts/release/inputRelease.sh
|
Shell
|
mit
| 572 |
#!/bin/bash
$PROJECT_HOME/env/bin/gunicorn \
-b 0.0.0.0:8888 \
--error-logfile=- \
--access-logfile=- \
--access-logformat="%(h)s %(l)s %(u)s %(t)s '%(r)s' %(s)s %(b)s" \
--keep-alive 5 \
--workers 4 \
--worker-class "gevent" \
parser_app:app 2>&1
|
qiqiustb/slack-challenge
|
html_parser/run.sh
|
Shell
|
mit
| 281 |
VARIANT=musl
. "$IMAGEDIR/config.sh"
. "$INCLUDE/void.sh"
generate-void
|
vpsfreecz/build-vpsfree-templates
|
images/void-musl/build.sh
|
Shell
|
mit
| 72 |
#!/bin/bash
# Create a date stamped journal file
# (UTC fits my day/night cycle better)
journalfile=$(date --utc +%Y%m%d-%u.md);
if [[ ! -f $journalfile ]]; then
# Create journal file with today's date as Heading 1
echo "# "$(date "+%A, %e %B, %Y") >> $journalfile;
# Add to git repo
git add $journalfile;
else
echo "File already exists: $journalfile";
fi;
vim $journalfile;
git commit -m "Changes in journal file $journalfile" $journalfile;
git push origin master;
|
samuell/mdnote
|
editnewjournalfile.sh
|
Shell
|
mit
| 474 |
export PATH="$HOME/.rbenv/shims:/.rbenv/bin:$PATH"
|
coxy/dotfiles
|
ruby/path.zsh
|
Shell
|
mit
| 50 |
#!/bin/sh
VERSION=0.9.33.2
tar --files-from=file.list -xjvf ../uClibc-$VERSION.tar.bz2
mv uClibc-$VERSION uClibc-$VERSION-orig
cp -rf ./uClibc-$VERSION-new ./uClibc-$VERSION
diff -b --unified -Nr uClibc-$VERSION-orig uClibc-$VERSION > uClibc-$VERSION-out-of-tree.patch
mv uClibc-$VERSION-out-of-tree.patch ../patches
rm -rf ./uClibc-$VERSION
rm -rf ./uClibc-$VERSION-orig
|
radix-platform/toolchains
|
sources/uClinux/uClibc/create-0.9.33.2-out-of-tree-patch/create.patch.sh
|
Shell
|
mit
| 380 |
#!/bin/bash
python prettyqr.py "http://github.com/davidszotten" --logo=example/logo.svg --logo-raster=example/logo.png --color "#ff6600" --output example/output.svg
|
davidszotten/prettyqr
|
example.sh
|
Shell
|
mit
| 166 |
#!/bin/bash
BASE_DIR=`dirname $0`
echo ""
echo "This script starts all the services needed to develop the"
echo "motorsports API locally."
echo "-------------------------------------------------------------------"
# Default parameters
#
HOST="127.0.0.1"
PORT="5000"
PROCFILE="bin/procfiles/Procfile.dev"
# Parse the command line flags.
#
while getopts ":np:f:" opt; do
case $opt in
n)
# Get the IP address on a mac. Only works on a mac.
#
HOST=`ifconfig | grep -E 'inet.[0-9]' | grep -v '127.0.0.1' | awk '{ print $2}'|head -n1`
;;
p)
# Set the port
#
PORT=${OPTARG}
;;
f)
# Set the Procfile
#
PROCFILE=${OPTARG}
if [[ ! -e "$PROCFILE" ]]; then
die "...your specified $PROCFILE does not exist"
fi
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
# Custom die function.
#
die() { echo >&2 -e "\nRUN ERROR: $@\n"; exit 1; }
# Check for required programs: Postgres.app, honcho
#
HONCHO=$(command -v honcho || command -v foreman || die "...Error: honcho/foreman is not in your path! Are you in the right virtualenv?")
POSTGRES="/Applications/Postgres.app"
if [ ! -d "$POSTGRES" ]; then
die "...Error: cannot find Postgres.app"
fi
# Check for .env file
#
if [[ ! -e "$BASE_DIR/../.env" ]]; then
die "...You need to have a .env file at $BASE_DIR/../.env"
fi
# Print config
#
echo ""
echo "Configuration:"
echo -e "\tPROCFILE: $PROCFILE"
echo -e "\tHONCO: $HONCO"
echo -e "\tPOSTGRES: $POSTGRES"
echo -e "\tHOST: $HOST"
echo -e "\tPORT: $PORT"
echo -e "\tPATH: $PATH"
echo ""
echo "-------------------------------------------------------------------"
# Start Postgres, which daemonizes, so cannot be used
# with foreman/honcho
#
open $POSTGRES
# Start the other processes. See bin/Procfile.dev
#
HOST=$HOST PORT=$PORT $HONCHO start -f $PROCFILE
|
pitrho/motorsports-historic-data-api
|
bin/devserver.sh
|
Shell
|
mit
| 1,892 |
#!/bin/bash
ps aux |grep node|grep cn_app|awk '{print $2}' |xargs kill
cd /home/joker/earn
nohup node cn_app 1>> nohup.cn.out 2>&1 &
ps aux|grep tail |grep 'nohup.cn.out'|awk '{print $2}' |xargs kill
tail -f nohup.cn.out &
|
wusuowe/earn
|
start_cn.sh
|
Shell
|
mit
| 224 |
#!/usr/bin/env bash
#return value visualisation
PS1="\$? \$(if [[ \$? == 0 ]]; then echo \"\[\033[0;32m\];)\"; else echo \"\[\033[0;31m\];(\"; fi)\[\033[00m\] : "
# Zero is a green smiley and non-zero a red one. So your prompt will smile if the last operation was successful.
|
stephaneAG/Bash_tests
|
promptHacks/returnValueOfLastCmd.sh
|
Shell
|
mit
| 277 |
#!/bin/bash
INSTALL_ROOT=$HOME
DOTFILES_ROOT=$INSTALL_ROOT/dotfiles
VIM_ROOT=$INSTALL_ROOT/.vim
if [ -z $OSTYPE ] ; then
OSTYPE="$(uname -o)"
fi
case "$OSTYPE" in
darwin*) OS=OSX ;;
linux*) OS=LINUX ;;
Linux*) OS=LINUX ;;
*) OS=unknown ;;
esac
echo "Install prerequisites for ${OS}"
if [ "${OS}" = "LINUX" ] ; then
case "$(uname -o)" in
Android*) OS_TYPE=ANDROID ;;
*) OS_TYPE=LINUX ;;
esac
echo "OS is ${OS} and OS type is ${OS_TYPE}"
if [ "${OS_TYPE}" = "LINUX" ] ; then
LINUX_ID=`awk -F '=' '/^ID=/ { print $2 }' /etc/os-release | tr -d \"`
if [ "${LINUX_ID}" = "centos" ] ; then
echo "Install centos packages"
# Note: Uncomment when required latest packages
#sudo yum update
# Minimalistic package set for ubuntu
sudo yum -y install file mc vim
# Note: uncomment for SF development
#sudo yum groupinstall -y 'Development Tools'
#sudo yum -y install libcap-devel texi2html texinfo
# Download source RPM zsh 5.5.1 from Fefora Core repo
#curl http://dl.fedoraproject.org/pub/fedora/linux/updates/28/Everything/SRPMS/Packages/z/zsh-5.5.1-1.fc28.src.rpm --output ~/zsh-5.5.1-1.fc28.src.rpm
#rpmbuild --rebuild ~/zsh-5.5.1-1.fc28.src.rpm
#sudo rpm -ivh ~/rpmbuild/RPMS/x86_64/zsh-5.5.1-1.el7.x86_64.rpm
#rm -rf ~/rpmbuild
#rm ~/zsh-5.5.1-1.fc28.src.rpm
curl http://mirror.ghettoforge.org/distributions/gf/gf-release-latest.gf.el7.noarch.rpm -o gf-release-latest.gf.el7.noarch.rpm
sudo rpm -Uvh gf-release*rpm
rm gf-release-latest.gf.el7.noarch.rpm
sudo rpm -Uvh http://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/e/epel-release-7-11.noarch.rpm
sudo yum -y install epel-release
sudo rpm -Uvh https://centos7.iuscommunity.org/ius-release.rpm
sudo yum -y --enablerepo=gf-plus install tmux zsh git2u
fi
if [ "${LINUX_ID}" = "ubuntu" ] ; then
echo "Install ubuntu packages"
sudo apt-get update
# Minimalistic package set for ubuntu
sudo apt-get -y install git zsh build-essential file tmux mc vim
fi
if [ "${LINUX_ID}" = "alpine" ] ; then
echo "Install alpine packages"
apk update
apk add zsh git bash zsh zsh-vcs tmux mc vim
fi
fi
if [ "${OS_TYPE}" = "ANDROID" ] ; then
echo "Install android Termux packages"
# Minimalistic package set for Termux
pkg install zsh file git mc ncurses-utils openssh tmux vim
fi
fi
if [ "${OS}" = "OSX" ] ; then
echo "Install OSX Homebrew"
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
echo "Install MacOS packages"
# Minimalistic package set for MacOS X
brew install git
brew install zsh
brew install peco
brew install vim
brew cask install font-jetbrains-mono
fi
# Dotfiles itself
git clone https://github.com/alxzoomer/dotfiles.git $DOTFILES_ROOT
# Spaceship theme for Oh my ZSH
git clone https://github.com/denysdovhan/spaceship-prompt.git $DOTFILES_ROOT/shell/custom/themes/spaceship-prompt
# Powerlevel9k theme for Oh my ZSH
git clone https://github.com/bhilburn/powerlevel9k.git $DOTFILES_ROOT/shell/custom/themes/powerlevel9k
# Powerlevel10k theme for Oh my ZSH
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git $DOTFILES_ROOT/shell/custom/themes/powerlevel10k
# ZSH autosuggestion custom plugin
git clone https://github.com/zsh-users/zsh-autosuggestions.git $DOTFILES_ROOT/shell/custom/plugins/zsh-autosuggestions
# ZSH zsh-syntax-highlighting
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git $DOTFILES_ROOT/shell/custom/plugins/zsh-syntax-highlighting
# ZSH peco history plugin
git clone https://github.com/jimeh/zsh-peco-history.git $DOTFILES_ROOT/shell/custom/plugins/zsh-peco-history
# VIM Vundle plugin
git clone https://github.com/VundleVim/Vundle.vim.git $VIM_ROOT/bundle/Vundle.vim
# Install Oh my ZSH
git clone https://github.com/robbyrussell/oh-my-zsh.git $INSTALL_ROOT/.oh-my-zsh
$(which bash) $DOTFILES_ROOT/setup.sh
# Install VIM Plugins and exit when Vundle.vim is in default directory
if [ -d "$HOME/.vim/bundle/Vundle.vim" ] ; then
echo "Installing VIM plugins"
vim -T dumb -n -c "set nomore" -c "PluginInstall" -c "qall"
echo "VIM plugins installed"
fi
# Final step change default shell to ZSH
if [ "${LINUX_ID}" != "alpine" ] ; then
echo "Switch shell to ZSH"
chsh -s $(which zsh)
fi
|
alxzoomer/dotfiles
|
install.sh
|
Shell
|
mit
| 4,423 |
#!/bin/sh
set -ev
Rscript -e "bookdown::render_book('index.Rmd', 'bookdown::gitbook')"
#Rscript -e "bookdown::render_book('index.Rmd', 'bookdown::pdf_book')"
#Rscript -e "bookdown::render_book('index.Rmd', 'bookdown::epub_book')"
|
deynarde/kag-knight-compendium
|
_build.sh
|
Shell
|
cc0-1.0
| 233 |
#!/bin/bash
#
# Configuration of the command line tests
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 2005 Ulf Lamping
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Set WS_SYSTEM to our system type, e.g. Windows, Linux, Darwin
# http://support.microsoft.com/kb/100843
if [ -n "${OS#Windows}" ] ; then
WS_SYSTEM="Windows"
export CYGWIN="$CYGWIN error_start=c:\cygwin\bin\dumper.exe -d %1 %2"
else
WS_SYSTEM=`uname -s`
fi
#
#
ENDIANNESS="little"
echo -n I | od -to2 | awk '{ lastbit = substr($2,6,1); exit lastbit }'
if [ $? -eq 0 ] ; then
ENDIANNESS="big"
fi
# Absolute path to the source tree
SOURCE_DIR="$(cd "$(dirname "$0")" && cd .. && pwd)"
# Absolute path to this test directory (for capture and config files)
TESTS_DIR="$SOURCE_DIR/test"
# Are we allowed to open interfaces or capture on this system?
SKIP_CAPTURE=${SKIP_CAPTURE:-1}
# Override the last two items if we're running Windows
if [ "$WS_SYSTEM" = "Windows" ] ; then
WS_BIN_PATH=${WS_BIN_PATH:-$SOURCE_DIR/wireshark-gtk2}
SKIP_CAPTURE=0
fi
# Path to the Wireshark binaries, default to source dir if unset
WS_BIN_PATH=${WS_BIN_PATH:-$SOURCE_DIR}
# Tweak the following to your liking.
WIRESHARK_GTK=$WS_BIN_PATH/wireshark-gtk
TSHARK=$WS_BIN_PATH/tshark
RAWSHARK=$WS_BIN_PATH/rawshark
CAPINFOS=$WS_BIN_PATH/capinfos
DUMPCAP=$WS_BIN_PATH/dumpcap
# interface with at least a few packets/sec traffic on it
# (e.g. start a web radio to generate some traffic :-)
# an interfaces index (1 based) should do well for recent devbuilds
if [ "$WS_SYSTEM" = "Windows" -a -z "$TRAFFIC_CAPTURE_IFACE" ] ; then
# Try to fetch the first Ethernet interface.
TRAFFIC_CAPTURE_IFACE=`$TSHARK -D 2>&1 | \
egrep 'Ethernet|Network Connection|VMware|Intel|Realtek' | \
head -1 | cut -c 1`
fi
TRAFFIC_CAPTURE_IFACE=${TRAFFIC_CAPTURE_IFACE:-1}
# time to capture some traffic (in seconds)
# (you may increase this if you get errors caused by very low traffic)
TRAFFIC_CAPTURE_DURATION=15
# the default is to not capture in promiscuous mode
# (this makes known trouble with some Windows WLAN adapters)
# if you need promiscuous mode, comment this line out
TRAFFIC_CAPTURE_PROMISC=-p
# only test capturing from a fifo if we're not on Windows
# and we have a mkfifo. (Windows cygwin has a mkfifo but
# Windows dumpcap & etc use Windows named pipes which
# are different than the cygwin named pipes).
#
if [ "$WS_SYSTEM" != "Windows" ] && which mkfifo &>/dev/null ; then
TEST_FIFO=1
fi
# Tell Wireshark to quit after capuring packets.
export WIRESHARK_QUIT_AFTER_CAPTURE="True"
CAPTURE_DIR="$TESTS_DIR/captures/"
# Figure out if we were built with lua or not so we can skip the lua tests if we
# don't have it.
$TSHARK -v | grep -q "with Lua"
HAVE_LUA=$?
# Display our environment
##printf "\n ------- Info =-----------------\n"
##printf "Syms :$WS_SYSTEM: :$TRAFFIC_CAPTURE_IFACE: :$SKIP_CAPTURE: :$TEST_FIFO:\n"
##
##ls -l $WIRESHARK $TSHARK $DUMPCAP
##ls -l $(which wireshark) $(which tshark) $(which dumpcap)
##printf " ----------------------------------\n\n"
# Editor modelines
#
# Local Variables:
# sh-basic-offset: 8
# tab-width: 8
# indent-tabs-mode: t
# End:
#
# ex: set shiftwidth=8 tabstop=8 noexpandtab:
# :indentSize=8:tabSize=8:noTabs=false:
|
sodzawic/tk
|
test/config.sh
|
Shell
|
gpl-2.0
| 3,983 |
#!/bin/bash
name=sagittal-examples
midi=${name}-midi
ps=${name}-ps
microabc -i- -S -M${name}.abp > ${midi}.abc
abc2midi ${midi}.abc
microabc -i- -S -P${name}.abp > ${ps}.abc
abcm2ps ${ps}.abc -O ${name}.ps
ps2pdf ${name}.ps
|
laughingman182/microabc
|
sagittal-examples.sh
|
Shell
|
gpl-2.0
| 225 |
#/!bin/sh
## $Id: viper-ubuntu.sh 24/07/2015 $
##
## script to install Viper on debian/ubuntu 14.04 minimal
##
## Authored by [email protected]
##
## https://github.com/daverstephens/The-SOC-Shop
##
## Start with a minimal install of 64bit Ubuntu Server 14.04
## which should include OpenSSH option selected during install
##
## Tested against Viper 1.3-dev
##
## Run the script as the standard user with the command below
##
## sudo sh viper-ubuntu.sh 2>&1 | tee viper-ubuntu.log
##
## When the script has completed, check viper-ubuntu.log for errors
## and then complete the final activities listed at the end of this script
## Install latest updates
apt-get -y update && apt-get -y upgrade
## Install dependencies
env DEBIAN_FRONTEND=noninteractive apt-get -y install unzip build-essential python-dev python-pip git automake libtool libimage-exiftool-perl swig libssl-dev libfuzzy-dev
## Set build paths
printf "Enter the full path to your temporary build directory\n"
read TempPath
[ ! -d $TempPath ] && mkdir $TempPath
printf "Enter the full path to your desired Viper directory\nFor example \\\home\\\user\n"
read ViperPath
[ ! -d $ViperPath ] && mkdir $ViperPath
cd $TempPath
## Install Yara
wget https://github.com/plusvic/yara/archive/master.zip
unzip master.zip
cd yara-*/
bash build.sh
make install
## Install PyDeep
cd yara-python/
python setup.py install
cd $TempPath
## Install SSDeep
wget http://sourceforge.net/projects/ssdeep/files/ssdeep-2.13/ssdeep-2.13.tar.gz/download
mv download ssdeep.tar.gz
tar zxf ssdeep.tar.gz
cd ssdeep-*/
./configure
make
sudo make install
cd $TempPath
## Install Python bindings
pip install pydeep
## Install Androguard (Optional - Needed for .apk files)
wget https://androguard.googlecode.com/files/androguard-1.9.tar.gz
tar zxf androguard-1.9.tar.gz
cd androguard-1.9/
python setup.py install
cd $TempPath
## Install EXIFTool
git clone https://github.com/smarnach/pyexiftool
cd pyexiftool
sudo python setup.py install
## Install Viper
cd $ViperPath
git clone https://github.com/botherder/viper
cd viper
pip install -r requirements.txt
## Test Install
yara --help|grep -q vmalvarez||echo "Yara :Install seems to have failed";
ssdeep -h|grep -q Kornblum||echo "SSDeep :Install seems to have failed";
## Run Viper with './viper.py'
|
Archangel-SOC/The-SOC-Shop
|
Viper/viper-install.sh
|
Shell
|
gpl-2.0
| 2,343 |
#!/bin/sh
#
# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# @test
# @bug 4758537 4809833 8149599
# @summary Test that javac and java find files in similar ways
# @author Martin Buchholz
#
# @run shell/timeout=600 MineField.sh
# To run this test manually, simply do ./MineField.sh
#----------------------------------------------------------------
# The search order for classes used by both java and javac is:
#
# -Xbootclasspath/p:<path>
# -endorseddirs <dirs> or -Djava.endorsed.dirs=<dirs> (search for jar/zip only)
# -bootclasspath <path> or -Xbootclasspath:<path>
# -Xbootclasspath/a:<path>
# -extdirs <dirs> or -Djava.ext.dirs=<dirs> (search for jar/zip only)
# -classpath <path>, -cp <path>, env CLASSPATH=<path>
#
# Peculiarities of the class file search:
# - Empty elements of the (user) classpath default to ".",
# while empty elements of other paths are ignored.
# - Only for the user classpath is an empty string value equivalent to "."
# - Specifying a bootclasspath on the command line obliterates any
# previous -Xbootclasspath/p: or -Xbootclasspath/a: command line flags.
#
# JDK 9 update:
# java: The java launcher does not support any of the following:
# * -Xbootclasspath/p: -Xbootclasspath:
# * -endorseddirs -Djava.endorsed.dirs
# * -extdirs -Djava.ext.dirs
# All test cases exercising these features have been removed.
# javac: The following features are only supported when compiling
# for older releases:
# * -Xbootclasspath/p: -Xbootclasspath: -bootclasspath -Xbootclasspath/a:
# * -endorseddirs -Djava.endorsed.dirs
# * -extdirs -Djava.ext.dirs
# All test cases exercising these features have been modified to
# use -source 8 -target 8. In addition, javac test cases involving
# use of the runtime properties java.endorsed.dirs and java.extdirs
# (by means of -J-Dname=value) have been removed.
# Although the primary purpose of the test cases in this file is to
# compare javac and java behavior, some tests remain for javac for
# which there is no java equivalent. However, the cases remain as useful
# test cases for javac handling of the paths involved.
#----------------------------------------------------------------
. ${TESTSRC-.}/Util.sh
set -u
#----------------------------------------------------------------
# Prepare the "Minefield"
#----------------------------------------------------------------
Cleanup() {
Sys rm -rf GooSrc GooJar GooZip GooClass
Sys rm -rf BadSrc BadJar BadZip BadClass
Sys rm -rf OneDir *.class Main.java MANIFEST.MF
Sys rm -f java-lang.jar
}
Cleanup
Sys mkdir GooSrc GooJar GooZip GooClass
Sys mkdir BadSrc BadJar BadZip BadClass
echo 'public class Lib {public static void f(){}}' > Lib.java
Sys "$javac" ${TESTTOOLVMOPTS} Lib.java
Sys "$jar" cf GooJar/Lib.jar Lib.class
Sys "$jar" cf GooZip/Lib.zip Lib.class
Sys mv Lib.class GooClass/.
Sys mv Lib.java GooSrc/.
CheckFiles GooZip/Lib.zip GooJar/Lib.jar GooSrc/Lib.java
echo 'public class Lib {/* Bad */}' > Lib.java
Sys "$javac" ${TESTTOOLVMOPTS} Lib.java
Sys "$jar" cf BadJar/Lib.jar Lib.class
Sys "$jar" cf BadZip/Lib.zip Lib.class
Sys mv Lib.class BadClass/.
Sys mv Lib.java BadSrc/.
CheckFiles BadZip/Lib.zip BadJar/Lib.jar BadSrc/Lib.java
echo 'public class Main {public static void main(String[] a) {Lib.f();}}' > Main.java
# Create a jar file that is good enough to put on the javac boot class path (i.e. contains java.lang.**)
Sys "$jimage" extract --dir modules ${TESTJAVA}/lib/modules
Sys "$jar" cf java-lang.jar -C modules/java.base java/lang
Sys rm -rf modules
#----------------------------------------------------------------
# Verify that javac class search order is the same as java's
#----------------------------------------------------------------
Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"GooClass" \
-bootclasspath "java-lang.jar${PS}BadZip/Lib.zip" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"BadClass${PS}GooClass" \
-bootclasspath "java-lang.jar${PS}GooZip/Lib.zip${PS}BadClass" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"BadJar/Lib.jar" \
-Xbootclasspath:"java-lang.jar${PS}GooClass" \
Main.java
#----------------------------------------------------------------
Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-bootclasspath "java-lang.jar${PS}GooZip/Lib.zip" \
-Xbootclasspath/p:"BadClass" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-bootclasspath "java-lang.jar${PS}BadZip/Lib.zip" \
-Xbootclasspath/p:"GooClass${PS}BadJar/Lib.jar" \
Main.java
#----------------------------------------------------------------
Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"BadClass" \
-Xbootclasspath/a:"GooClass" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"GooClass${PS}BadClass" \
-Xbootclasspath/a:"BadClass" \
Main.java
Success "$java" ${TESTVMOPTS} \
-Xbootclasspath/a:"GooClass" \
Main
#----------------------------------------------------------------
Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"GooClass" \
-Xbootclasspath:"BadClass${PS}java-lang.jar" \
-Xbootclasspath/a:"GooClass" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/p:"BadClass" \
-Xbootclasspath:"GooClass${PS}BadClass${PS}java-lang.jar" \
-Xbootclasspath/a:"BadClass" \
Main.java
#----------------------------------------------------------------
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-endorseddirs "BadClass${PS}GooZip${PS}BadJar" \
-Xbootclasspath:"BadClass${PS}java-lang.jar" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Djava.endorsed.dirs="BadClass${PS}GooZip${PS}BadJar" \
-Xbootclasspath:"BadClass${PS}java-lang.jar" \
Main.java
#----------------------------------------------------------------
Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/a:"BadClass" \
-extdirs "GooZip" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Xbootclasspath/a:"GooClass${PS}BadClass" \
-extdirs "BadZip" \
Main.java
#----------------------------------------------------------------
Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-extdirs "GooClass${PS}BadZip" \
-cp "GooZip/Lib.zip" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-extdirs "BadClass${PS}GooZip${PS}BadJar" \
-cp "BadZip/Lib.zip" \
Main.java
Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} \
-Djava.ext.dirs="GooZip${PS}BadJar" \
-classpath "BadZip/Lib.zip" \
Main.java
#----------------------------------------------------------------
Failure "$javac" ${TESTTOOLVMOPTS} -classpath "BadClass${PS}GooClass" Main.java
Success "$javac" ${TESTTOOLVMOPTS} -classpath "GooClass${PS}BadClass" Main.java
Failure "$java" ${TESTVMOPTS} -classpath "BadClass${PS}GooClass${PS}." Main
Success "$java" ${TESTVMOPTS} -classpath "GooClass${PS}BadClass${PS}." Main
Failure "$javac" ${TESTTOOLVMOPTS} -cp "BadJar/Lib.jar${PS}GooZip/Lib.zip" Main.java
Success "$javac" ${TESTTOOLVMOPTS} -cp "GooJar/Lib.jar${PS}BadZip/Lib.zip" Main.java
Failure "$java" ${TESTVMOPTS} -cp "BadJar/Lib.jar${PS}${PS}GooZip/Lib.zip" Main
Success "$java" ${TESTVMOPTS} -cp "GooJar/Lib.jar${PS}${PS}BadZip/Lib.zip" Main
Failure env CLASSPATH="BadZip/Lib.zip${PS}GooJar/Lib.jar" "$javac" ${TESTTOOLVMOPTS} Main.java
Success env CLASSPATH="GooZip/Lib.zip${PS}BadJar/Lib.jar" "$javac" ${TESTTOOLVMOPTS} Main.java
Failure env CLASSPATH="${PS}BadZip/Lib.zip${PS}GooJar/Lib.jar" "$java" ${TESTVMOPTS} Main
Success env CLASSPATH="${PS}GooZip/Lib.zip${PS}BadJar/Lib.jar" "$java" ${TESTVMOPTS} Main
#----------------------------------------------------------------
# Check behavior of empty paths and empty path elements
#----------------------------------------------------------------
In() { cd "$1"; shift; "$@"; cd ..; }
In GooClass Failure "$javac" ${TESTTOOLVMOPTS} -cp ".." ../Main.java
In GooClass Failure "$java" ${TESTVMOPTS} -cp ".." Main
# Unspecified classpath defaults to "."
Sys mkdir OneDir; Sys cp Main.java GooClass/Lib.class OneDir/.
In OneDir Success "$javac" ${TESTTOOLVMOPTS} Main.java
In OneDir Success "$java" ${TESTVMOPTS} Main
# Empty classpath elements mean "."
In GooClass Success "$javac" ${TESTTOOLVMOPTS} -cp "${PS}.." ../Main.java
In GooClass Success "$java" ${TESTVMOPTS} -cp "${PS}.." Main
In GooClass Success "$javac" ${TESTTOOLVMOPTS} -cp "..${PS}" ../Main.java
In GooClass Success "$java" ${TESTVMOPTS} -cp "..${PS}" Main
In GooClass Success "$javac" ${TESTTOOLVMOPTS} -cp "..${PS}${PS}/xyzzy" ../Main.java
In GooClass Success "$java" ${TESTVMOPTS} -cp "..${PS}${PS}/xyzzy" Main
# All other empty path elements are ignored.
In GooJar Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -extdirs "" -cp ".." ../Main.java
In GooJar Failure "$javac" -source 8 -targt 8 ${TESTTOOLVMOPTS} -extdirs "${PS}" -cp ".." ../Main.java
In GooJar Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Djava.ext.dirs="${PS}" -cp ".." ../Main.java
In GooJar Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -extdirs "." -cp ".." ../Main.java
In GooJar Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Djava.ext.dirs="." -cp ".." ../Main.java
In GooJar Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Djava.endorsed.dirs="" -cp ".." ../Main.java
In GooJar Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -endorseddirs "${PS}" -cp ".." ../Main.java
In GooJar Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Djava.endorsed.dirs="." -cp ".." ../Main.java
In GooClass Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Xbootclasspath/p: -cp ".." ../Main.java
In GooClass Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Xbootclasspath/p:. -cp ".." ../Main.java
In GooClass Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Xbootclasspath:"../java-lang.jar" -cp ".." ../Main.java
In GooClass Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Xbootclasspath:"../java-lang.jar${PS}." -cp ".." ../Main.java
In GooClass Failure "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Xbootclasspath/a: -cp ".." ../Main.java
In GooClass Failure "$java" ${TESTVMOPTS} -Xbootclasspath/a: -cp ".." Main
In GooClass Success "$javac" -source 8 -target 8 ${TESTTOOLVMOPTS} -Xbootclasspath/a:. -cp ".." ../Main.java
In GooClass Success "$java" ${TESTVMOPTS} -Xbootclasspath/a:. -cp ".." Main
Cleanup
Bottom Line
|
FauxFaux/jdk9-langtools
|
test/tools/javac/Paths/MineField.sh
|
Shell
|
gpl-2.0
| 11,801 |
#!/bin/sh
export PLATFORM="AOSP"
export MREV="KK4.4"
export CURDATE=`date "+%m.%d.%Y"`
export MUXEDNAMELONG="KT-SGS4-$MREV-$PLATFORM-$CARRIER-$CURDATE"
export MUXEDNAMESHRT="KT-SGS4-$MREV-$PLATFORM-$CARRIER*"
export KTVER="--$MUXEDNAMELONG--"
export KERNELDIR=`readlink -f .`
export PARENT_DIR=`readlink -f ..`
export INITRAMFS_DEST=$KERNELDIR/kernel/usr/initramfs
export INITRAMFS_SOURCE=`readlink -f ..`/Ramdisks/$PLATFORM"_"$CARRIER"4.4"
export CONFIG_$PLATFORM_BUILD=y
export PACKAGEDIR=$PARENT_DIR/Packages/$PLATFORM
#Enable FIPS mode
export USE_SEC_FIPS_MODE=true
export ARCH=arm
# export CROSS_COMPILE=/home/ktoonsez/aokp4.2/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
export CROSS_COMPILE=$PARENT_DIR/linaro4.9-a15/bin/arm-cortex_a15-linux-gnueabihf-
time_start=$(date +%s.%N)
echo "Remove old Package Files"
rm -rf $PACKAGEDIR/*
echo "Setup Package Directory"
mkdir -p $PACKAGEDIR/system/app
mkdir -p $PACKAGEDIR/system/lib/modules
mkdir -p $PACKAGEDIR/system/etc/init.d
echo "Create initramfs dir"
mkdir -p $INITRAMFS_DEST
echo "Remove old initramfs dir"
rm -rf $INITRAMFS_DEST/*
echo "Copy new initramfs dir"
cp -R $INITRAMFS_SOURCE/* $INITRAMFS_DEST
echo "chmod initramfs dir"
chmod -R g-w $INITRAMFS_DEST/*
rm $(find $INITRAMFS_DEST -name EMPTY_DIRECTORY -print)
rm -rf $(find $INITRAMFS_DEST -name .git -print)
echo "Remove old zImage"
rm $PACKAGEDIR/zImage
rm arch/arm/boot/zImage
echo "Make the kernel"
make VARIANT_DEFCONFIG=jf_$CARRIER"_defconfig" SELINUX_DEFCONFIG=jfselinux_defconfig SELINUX_LOG_DEFCONFIG=jfselinux_log_defconfig KT_jf_defconfig
echo "Modding .config file - "$KTVER
sed -i 's,CONFIG_LOCALVERSION="-KT-SGS4",CONFIG_LOCALVERSION="'$KTVER'",' .config
HOST_CHECK=`uname -n`
if [ $HOST_CHECK = 'ktoonsez-VirtualBox' ] || [ $HOST_CHECK = 'task650-Underwear' ]; then
echo "Ktoonsez/task650 24!"
make -j24
else
echo "Others! - " + $HOST_CHECK
make -j`grep 'processor' /proc/cpuinfo | wc -l`
fi;
echo "Copy modules to Package"
cp -a $(find . -name *.ko -print |grep -v initramfs) $PACKAGEDIR/system/lib/modules/
if [ $ADD_KTWEAKER = 'Y' ]; then
cp /home/ktoonsez/workspace/com.ktoonsez.KTweaker.apk $PACKAGEDIR/system/app/com.ktoonsez.KTweaker.apk
cp /home/ktoonsez/workspace/com.ktoonsez.KTmonitor.apk $PACKAGEDIR/system/app/com.ktoonsez.KTmonitor.apk
fi;
if [ -e $KERNELDIR/arch/arm/boot/zImage ]; then
echo "Copy zImage to Package"
cp arch/arm/boot/zImage $PACKAGEDIR/zImage
echo "Make boot.img"
./mkbootfs $INITRAMFS_DEST | gzip > $PACKAGEDIR/ramdisk.gz
./mkbootimg --cmdline 'androidboot.hardware=qcom user_debug=31 zcache msm_rtb.filter=0x3F ehci-hcd.park=3' --kernel $PACKAGEDIR/zImage --ramdisk $PACKAGEDIR/ramdisk.gz --base 0x80200000 --pagesize 2048 --ramdisk_offset 0x02000000 --output $PACKAGEDIR/boot.img
#if [ $EXEC_LOKI = 'Y' ]; then
# echo "Executing loki"
# ./loki_patch-linux-x86_64 boot aboot$CARRIER.img $PACKAGEDIR/boot.img $PACKAGEDIR/boot.lok
# rm $PACKAGEDIR/boot.img
#fi;
cd $PACKAGEDIR
#if [ $EXEC_LOKI = 'Y' ]; then
# cp -R ../META-INF-SEC ./META-INF
#else
cp -R ../META-INF .
#fi;
cp -R ../kernel .
rm ramdisk.gz
rm zImage
rm ../$MUXEDNAMESHRT.zip
zip -r ../$MUXEDNAMELONG.zip .
time_end=$(date +%s.%N)
echo -e "${BLDYLW}Total time elapsed: ${TCTCLR}${TXTGRN}$(echo "($time_end - $time_start) / 60"|bc ) ${TXTYLW}minutes${TXTGRN} ($(echo "$time_end - $time_start"|bc ) ${TXTYLW}seconds) ${TXTCLR}"
export DLNAME="http://ktoonsez.jonathanjsimon.com/sgs4/$PLATFORM/$MUXEDNAMELONG.zip"
FILENAME=../$MUXEDNAMELONG.zip
FILESIZE=$(stat -c%s "$FILENAME")
echo "Size of $FILENAME = $FILESIZE bytes."
rm ../$MREV-$PLATFORM-$CARRIER"-version.txt"
exec 1>>../$MREV-$PLATFORM-$CARRIER"-version.txt" 2>&1
echo -n "$MUXEDNAMELONG,$FILESIZE," & curl -s https://www.googleapis.com/urlshortener/v1/url --header 'Content-Type: application/json' --data "{'longUrl': '$DLNAME'}" | grep \"id\" | sed -e 's,^.*id": ",,' -e 's/",.*$//'
#echo 1>&-
SHORTURL=$(grep "http" ../$MREV-$PLATFORM-$CARRIER"-version.txt" | sed s/$MUXEDNAMELONG,$FILESIZE,//g)
exec 1>>../url/aurlstats-$CURDATE.sh 2>&1
##echo "curl -s 'https://www.googleapis.com/urlshortener/v1/url?shortUrl="$SHORTURL"&projection=FULL' | grep -m2 \"shortUrlClicks\|\\\"longUrl\\\"\""
echo "echo "$MREV-$PLATFORM-$CARRIER
echo "curl -s 'https://www.googleapis.com/urlshortener/v1/url?shortUrl="$SHORTURL"&projection=FULL' | grep -m1 \"shortUrlClicks\""
#echo 1>&-
chmod 0777 ../url/aurlstats-$CURDATE.sh
sed -i 's,http://ktoonsez.jonathanjsimon.com/sgs4/'$PLATFORM'/'$MUXEDNAMESHRT','"[B]"$CURDATE":[/B] [url]"$SHORTURL'[/url],' ../url/SERVERLINKS.txt
cd $KERNELDIR
else
echo "KERNEL DID NOT BUILD! no zImage exist"
fi;
|
ktoonsez/KT-SGS4
|
build_master.sh
|
Shell
|
gpl-2.0
| 4,691 |
#!/bin/bash
# Script to deploy frontaccounting at Terminal.com
# Cloudlabs, INC. Copyright (C) 2015
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Cloudlabs, INC. - 653 Harrison St, San Francisco, CA 94107.
# http://www.terminal.com - [email protected]
INSTALL_PATH="/var/www"
# Includes
wget https://raw.githubusercontent.com/terminalcloud/apps/master/terlib.sh
source terlib.sh || (echo "cannot get the includes"; exit -1)
install(){
# Basics
pkg_update
system_cleanup
basics_install
# Procedure:
php5_install
mysql_install
mysql_setup frontaccounting frontaccounting terminal
cd $INSTALL_PATH
wget https://raw.githubusercontent.com/terminalcloud/apps/master/others/frontaccounting.tar.gz
tar -xzf frontaccounting.tar.gz && rm frontaccounting.tar.gz
chown -R www-data:www-data frontaccounting
apache_install
apache_default_vhost frontaccounting.conf $INSTALL_PATH/frontaccounting
echo "date.timezone = America/Los_Angeles" >> /etc/php5/apache2/php.ini
sed -i 's/upload_max_filesize\ \=\ 2M/upload_max_filesize\ \=\ 24M/g' /etc/php5/apache2/php.ini
sed -i 's/post_max_size\ \=\ 8M/post_max_size\ \=\ 32M/g' /etc/php5/apache2/php.ini
service apache2 restart
}
show(){
# Get the startup script
wget -q -N https://raw.githubusercontent.com/terminalcloud/apps/master/others/frontaccounting_hooks.sh
mkdir -p /CL/hooks/
mv frontaccounting_hooks.sh /CL/hooks/startup.sh
# Execute startup script by first to get the common files
chmod 777 /CL/hooks/startup.sh && /CL/hooks/startup.sh
}
if [[ -z $1 ]]; then
install && show
elif [[ $1 == "show" ]]; then
show
else
echo "unknown parameter specified"
fi
|
terminalcloud/apps
|
frontaccounting_installer.sh
|
Shell
|
gpl-2.0
| 2,319 |
# $1: package name
common_coinqt ()
{
cd $MYMKR_PREFIX/src/$1 || die "cd source dir failed"
qmake \
"USE_UPNP=-" \
INCLUDEPATH="$MYMKR_PREFIX/include" \
LIBS="-L$MYMKR_PREFIX/lib" \
DESTDIR="$MYMKR_PREFIX/bin" \
|| die 'qmake failed'
make $JOBS || die 'make failed'
}
|
sensorii/mymkr
|
mymkr/common/coinqt.sh
|
Shell
|
gpl-2.0
| 281 |
#!/bin/sh
test_description='test separate work tree'
. ./test-lib.sh
test_expect_success 'setup' '
EMPTY_TREE=$(git write-tree) &&
EMPTY_BLOB=$(git hash-object -t blob --stdin </dev/null) &&
CHANGED_BLOB=$(echo changed | git hash-object -t blob --stdin) &&
ZEROES=0000000000000000000000000000000000000000 &&
EMPTY_BLOB7=$(echo $EMPTY_BLOB | sed "s/\(.......\).*/\1/") &&
CHANGED_BLOB7=$(echo $CHANGED_BLOB | sed "s/\(.......\).*/\1/") &&
mkdir -p work/sub/dir &&
mkdir -p work2 &&
mv .git repo.git
'
test_expect_success 'setup: helper for testing rev-parse' '
test_rev_parse() {
echo $1 >expected.bare &&
echo $2 >expected.inside-git &&
echo $3 >expected.inside-worktree &&
if test $# -ge 4
then
echo $4 >expected.prefix
fi &&
git rev-parse --is-bare-repository >actual.bare &&
git rev-parse --is-inside-git-dir >actual.inside-git &&
git rev-parse --is-inside-work-tree >actual.inside-worktree &&
if test $# -ge 4
then
git rev-parse --show-prefix >actual.prefix
fi &&
test_cmp expected.bare actual.bare &&
test_cmp expected.inside-git actual.inside-git &&
test_cmp expected.inside-worktree actual.inside-worktree &&
if test $# -ge 4
then
# rev-parse --show-prefix should output
# a single newline when at the top of the work tree,
# but we test for that separately.
test -z "$4" && ! test -s actual.prefix ||
test_cmp expected.prefix actual.prefix
fi
}
'
test_expect_success 'setup: core.worktree = relative path' '
unset GIT_WORK_TREE;
GIT_DIR=repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
export GIT_DIR GIT_CONFIG &&
git config core.worktree ../work
'
test_expect_success 'outside' '
test_rev_parse false false false
'
test_expect_success 'inside work tree' '
(
cd work &&
GIT_DIR=../repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
test_rev_parse false false true ""
)
'
test_expect_failure 'empty prefix is actually written out' '
echo >expected &&
(
cd work &&
GIT_DIR=../repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
git rev-parse --show-prefix >../actual
) &&
test_cmp expected actual
'
test_expect_success 'subdir of work tree' '
(
cd work/sub/dir &&
GIT_DIR=../../../repo.git &&
GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
test_rev_parse false false true sub/dir/
)
'
test_expect_success 'setup: core.worktree = absolute path' '
unset GIT_WORK_TREE;
GIT_DIR=$(pwd)/repo.git &&
GIT_CONFIG=$GIT_DIR/config &&
export GIT_DIR GIT_CONFIG &&
git config core.worktree "$(pwd)/work"
'
test_expect_success 'outside' '
test_rev_parse false false false &&
(
cd work2 &&
test_rev_parse false false false
)
'
test_expect_success 'inside work tree' '
(
cd work &&
test_rev_parse false false true ""
)
'
test_expect_success 'subdir of work tree' '
(
cd work/sub/dir &&
test_rev_parse false false true sub/dir/
)
'
test_expect_success 'setup: GIT_WORK_TREE=relative (override core.worktree)' '
GIT_DIR=$(pwd)/repo.git &&
GIT_CONFIG=$GIT_DIR/config &&
git config core.worktree non-existent &&
GIT_WORK_TREE=work &&
export GIT_DIR GIT_CONFIG GIT_WORK_TREE
'
test_expect_success 'outside' '
test_rev_parse false false false &&
(
cd work2 &&
test_rev_parse false false false
)
'
test_expect_success 'inside work tree' '
(
cd work &&
GIT_WORK_TREE=. &&
test_rev_parse false false true ""
)
'
test_expect_success 'subdir of work tree' '
(
cd work/sub/dir &&
GIT_WORK_TREE=../.. &&
test_rev_parse false false true sub/dir/
)
'
test_expect_success 'setup: GIT_WORK_TREE=absolute, below git dir' '
mv work repo.git/work &&
mv work2 repo.git/work2 &&
GIT_DIR=$(pwd)/repo.git &&
GIT_CONFIG=$GIT_DIR/config &&
GIT_WORK_TREE=$(pwd)/repo.git/work &&
export GIT_DIR GIT_CONFIG GIT_WORK_TREE
'
test_expect_success 'outside' '
echo outside &&
test_rev_parse false false false
'
test_expect_success 'in repo.git' '
(
cd repo.git &&
test_rev_parse false true false
) &&
(
cd repo.git/objects &&
test_rev_parse false true false
) &&
(
cd repo.git/work2 &&
test_rev_parse false true false
)
'
test_expect_success 'inside work tree' '
(
cd repo.git/work &&
test_rev_parse false true true ""
)
'
test_expect_success 'subdir of work tree' '
(
cd repo.git/work/sub/dir &&
test_rev_parse false true true sub/dir/
)
'
test_expect_success 'find work tree from repo' '
echo sub/dir/untracked >expected &&
cat <<-\EOF >repo.git/work/.gitignore &&
expected.*
actual.*
.gitignore
EOF
>repo.git/work/sub/dir/untracked &&
(
cd repo.git &&
git ls-files --others --exclude-standard >../actual
) &&
test_cmp expected actual
'
test_expect_success 'find work tree from work tree' '
echo sub/dir/tracked >expected &&
>repo.git/work/sub/dir/tracked &&
(
cd repo.git/work/sub/dir &&
git --git-dir=../../.. add tracked
) &&
(
cd repo.git &&
git ls-files >../actual
) &&
test_cmp expected actual
'
test_expect_success '_gently() groks relative GIT_DIR & GIT_WORK_TREE' '
(
cd repo.git/work/sub/dir &&
GIT_DIR=../../.. &&
GIT_WORK_TREE=../.. &&
GIT_PAGER= &&
export GIT_DIR GIT_WORK_TREE GIT_PAGER &&
git diff --exit-code tracked &&
echo changed >tracked &&
test_must_fail git diff --exit-code tracked
)
'
test_expect_success 'diff-index respects work tree under .git dir' '
cat >diff-index-cached.expected <<-EOF &&
:000000 100644 $ZEROES $EMPTY_BLOB A sub/dir/tracked
EOF
cat >diff-index.expected <<-EOF &&
:000000 100644 $ZEROES $ZEROES A sub/dir/tracked
EOF
(
GIT_DIR=repo.git &&
GIT_WORK_TREE=repo.git/work &&
export GIT_DIR GIT_WORK_TREE &&
git diff-index $EMPTY_TREE >diff-index.actual &&
git diff-index --cached $EMPTY_TREE >diff-index-cached.actual
) &&
test_cmp diff-index.expected diff-index.actual &&
test_cmp diff-index-cached.expected diff-index-cached.actual
'
test_expect_success 'diff-files respects work tree under .git dir' '
cat >diff-files.expected <<-EOF &&
:100644 100644 $EMPTY_BLOB $ZEROES M sub/dir/tracked
EOF
(
GIT_DIR=repo.git &&
GIT_WORK_TREE=repo.git/work &&
export GIT_DIR GIT_WORK_TREE &&
git diff-files >diff-files.actual
) &&
test_cmp diff-files.expected diff-files.actual
'
test_expect_success 'git diff respects work tree under .git dir' '
cat >diff-TREE.expected <<-EOF &&
diff --git a/sub/dir/tracked b/sub/dir/tracked
new file mode 100644
index 0000000..$CHANGED_BLOB7
--- /dev/null
+++ b/sub/dir/tracked
@@ -0,0 +1 @@
+changed
EOF
cat >diff-TREE-cached.expected <<-EOF &&
diff --git a/sub/dir/tracked b/sub/dir/tracked
new file mode 100644
index 0000000..$EMPTY_BLOB7
EOF
cat >diff-FILES.expected <<-EOF &&
diff --git a/sub/dir/tracked b/sub/dir/tracked
index $EMPTY_BLOB7..$CHANGED_BLOB7 100644
--- a/sub/dir/tracked
+++ b/sub/dir/tracked
@@ -0,0 +1 @@
+changed
EOF
(
GIT_DIR=repo.git &&
GIT_WORK_TREE=repo.git/work &&
export GIT_DIR GIT_WORK_TREE &&
git diff $EMPTY_TREE >diff-TREE.actual &&
git diff --cached $EMPTY_TREE >diff-TREE-cached.actual &&
git diff >diff-FILES.actual
) &&
test_cmp diff-TREE.expected diff-TREE.actual &&
test_cmp diff-TREE-cached.expected diff-TREE-cached.actual &&
test_cmp diff-FILES.expected diff-FILES.actual
'
test_expect_success 'git grep' '
echo dir/tracked >expected.grep &&
(
cd repo.git/work/sub &&
GIT_DIR=../.. &&
GIT_WORK_TREE=.. &&
export GIT_DIR GIT_WORK_TREE &&
git grep -l changed >../../../actual.grep
) &&
test_cmp expected.grep actual.grep
'
test_expect_success 'git commit' '
(
cd repo.git &&
GIT_DIR=. GIT_WORK_TREE=work git commit -a -m done
)
'
test_expect_success 'absolute pathspec should fail gracefully' '
(
cd repo.git &&
test_might_fail git config --unset core.worktree &&
test_must_fail git log HEAD -- /home
)
'
test_expect_success 'make_relative_path handles double slashes in GIT_DIR' '
>dummy_file
echo git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file &&
git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file
'
test_done
|
vidarh/Git
|
t/t1501-worktree.sh
|
Shell
|
gpl-2.0
| 8,003 |
#!/bin/sh
#
# Attempt to figure out the minimum set of includes for our header files.
#
# ### this is incomplete. it merely lists the header files in order from
# ### "no dependencies on other svn headers" to the larger header files
# ### which have dependencies. manually working through the headers in
# ### this order will minimize includes.
#
# Each header file is test-compiled to ensure that it has enough headers.
# Of course, this could be false-positive because another header that
# has been included has further included something to enable compilation
# of the header in question. More sophisticated testing (e.g. filtering
# includes out of the included header) would be necessary for detection.
#
files="*.h private/*.h"
deps="deps.$$"
INCLUDES="-I. -I.. -I/usr/include/apr-1 -I/usr/include/apache2"
rm -f "$deps"
for f in $files ; do
sed -n "s%#include \"\(svn_[a-z0-9_]*\.h\)\".*%$f \1%p" $f | fgrep -v svn_private_config.h >> "$deps"
done
function process_file ()
{
echo "Processing $header"
echo "#include \"$header\"" > "$deps".c
gcc -o /dev/null -S $INCLUDES "$deps".c
### monkey the includes and recompile to find the minimal set
}
while test -s "$deps" ; do
#wc -l $deps
for header in $files ; do
if grep -q "^$header" "$deps" ; then
continue
fi
process_file
fgrep -v "$header" "$deps" > "$deps".new
mv "$deps".new "$deps"
files="`echo $files | sed s%$header%%`"
break
done
done
for header in $files ; do
process_file
done
|
bdmod/extreme-subversion
|
BinarySourcce/subversion-1.6.17/tools/dev/min-includes.sh
|
Shell
|
gpl-2.0
| 1,510 |
#!/bin/bash
rm .version
# Bash Color
green='\033[01;32m'
red='\033[01;31m'
blink_red='\033[05;31m'
restore='\033[0m'
clear
# Resources
THREAD="-j$(grep -c ^processor /proc/cpuinfo)"
KERNEL="zImage"
DTBIMAGE="dtb"
DEFCONFIG="shamu_defconfig"
# Kernel Details
BASE_AK_VER="LiquidKernel"
VER=".v2.5_"
CURDATE=$(date "+%m-%d-%Y")
BRANCH="bfs-"
AK_VER="$BASE_AK_VER$VER$CURDATE"
AK_VER_BFS="$BASE_AK_VER$VER$BRANCH$CURDATE"
# Vars
export CROSS_COMPILE=/home/teamliquid/Brock/liquid/prebuilts/gcc/linux-x86/arm/arm-eabi-6.0/bin/arm-eabi-
export ARCH=arm
export SUBARCH=arm
# Paths
KERNEL_DIR="/home/teamliquid/Brock/liquid/kernel/moto/shamu"
REPACK_DIR="/home/teamliquid/Brock/liquid/kernel/moto/shamu/utils/AnyKernel2"
MODULES_DIR="/home/teamliquid/Brock/liquid/kernel/moto/shamu/utils/AnyKernel2/modules"
ZIP_MOVE="/www/devs/teamliquid/Kernels/shamu/"
ZIMAGE_DIR="/home/teamliquid/Brock/liquid/kernel/moto/shamu/arch/arm/boot"
ZIP_DIR="/home/teamliquid/Brock/liquid/kernel/moto/shamu/utils/zip"
UTILS="/home/teamliquid/Brock/liquid/kernel/moto/shamu/utils"
# Functions
function clean_all {
rm -rf $MODULES_DIR/*
cd $ZIP_DIR/kernel
rm -rf $DTBIMAGE
cd $KERNEL_DIR
echo
make clean && make mrproper
}
function checkout_bfs {
cd $KERNEL_DIR
echo
git fetch lsd && git branch temp && git checkout lsd/bfs && git merge temp && git branch -D temp
}
function checkout_5.1 {
cd $KERNEL_DIR
echo
git fetch lsd && git branch temp && git checkout lsd/5.1 && git merge temp && git branch -D temp
}
function make_kernel {
echo
make $DEFCONFIG
script -q /home/teamliquid/Brock/Compile-$CURDATE.log -c "
make $THREAD "
}
function make_modules {
rm `echo $MODULES_DIR"/*"`
find $KERNEL_DIR -name '*.ko' -exec cp -v {} $MODULES_DIR \;
}
function make_dtb {
$REPACK_DIR/tools/dtbToolCM -2 -o $REPACK_DIR/$DTBIMAGE -s 2048 -p scripts/dtc/ arch/arm/boot/
}
function make_boot {
cp -vr $ZIMAGE_DIR/zImage-dtb $ZIP_DIR/kernel/zImage
}
function make_zip {
cd $ZIP_DIR
zip -r9 kernel.zip *
mv kernel.zip $ZIP_MOVE
rm $ZIP_DIR/kernel/zImage
}
function sign_zip {
cd $ZIP_MOVE
java -jar $UTILS/signapk.jar $UTILS/testkey.x509.pem $UTILS/testkey.pk8 kernel.zip `echo $AK_VER`.zip
rm kernel.zip
cd $KERNEL_DIR
}
function sign_zip_bfs {
cd $ZIP_MOVE
java -jar $UTILS/signapk.jar $UTILS/testkey.x509.pem $UTILS/testkey.pk8 kernel.zip `echo $AK_VER_BFS`.zip
rm kernel.zip
cd $KERNEL_DIR
}
DATE_START=$(date +"%s")
echo "---------------"
echo "Kernel Version:"
echo "---------------"
echo -e "${red}"; echo -e "${blink_red}"; echo "$AK_VER"; echo -e "${restore}";
echo -e "${green}"
echo "-----------------"
echo "Making LiquidKernel:"
echo "-----------------"
echo -e "${restore}"
while read -p "Which branch do you want to build (5.1/bfs)? " dchoice
do
case "$dchoice" in
bfs|BFS|Bfs)
checkout_bfs
make_kernel
make_dtb
make_modules
make_boot
make_zip
sign_zip_bfs
clean_all
break
;;
5.1)
checkout_5.1
make_kernel
make_dtb
make_modules
make_boot
make_zip
sign_zip
clean_all
break
;;
* )
echo
echo "Invalid try again!"
echo
;;
esac
done
echo -e "${green}"
echo "-------------------"
echo "Build Completed in:"
echo "-------------------"
echo -e "${restore}"
DATE_END=$(date +"%s")
DIFF=$(($DATE_END - $DATE_START))
echo "Time: $(($DIFF / 60)) minute(s) and $(($DIFF % 60)) seconds."
echo
|
CPA-Poke/LiquidKernel-Shamu
|
shamu.sh
|
Shell
|
gpl-2.0
| 3,390 |
#!/bin/sh
# tsql.sh
#
# $Id$
#
# VARIOUS MACHINE BYTEORDER support
#
# This file is part of the OpenLink Software Virtuoso Open-Source (VOS)
# project.
#
# Copyright (C) 1998-2012 OpenLink Software
#
# This project is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; only version 2 of the License, dated June 1991.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
LOGFILE=byteorder.output
export LOGFILE
. ./test_fn.sh
DSN=$PORT
www_server=bugzilla.openlinksw.com
www_port=7780
www_location=resources
demo_sol_db=demo-sol.db
BANNER "STARTED SERIES OF BYTEORDER TEST (byteorder.sh)"
SHUTDOWN_SERVER
rm -f $DBLOGFILE
rm -f $DBFILE
MAKECFG_FILE $TESTCFGFILE $PORT $CFGFILE
if [ -f "$demo_sol_db" ]
then
ECHO "File '$demo_sol_db found, no need to download it."
else
if [ "${OSTYPE}" = "solaris2.7" ]
then
ping "${www_server}" 128 3
else
ping -c 3 "${www_server}"
fi
pingres=$?
if [ $? = "0" ]
then
wget -t 10 -N "${www_server}:${www_port}/${www_location}/${demo_sol_db}.bz2" ./
if [ -f "${demo_sol_db}.bz2" ]
then
bzip2 -cd "${demo_sol_db}.bz2" > "${demo_sol_db}"
fi
else
ECHO "Unable to ping '${www_server}'"
fi
fi
if [ ! -f "$demo_sol_db" ]
then
LOG "***FAILED: could not get $demo_sol_db database"
exit 1
else
LOG "PASSED: $demo_sol_db"
fi
cp $demo_sol_db $DBFILE
if [ -f "${demo_sol_db}.bz2" ]
then
rm -f "${demo_sol_db}"
fi
RUN $SERVER $FOREGROUND_OPTION
SHUTDOWN_SERVER
START_SERVER $PORT 1000
RUN $ISQL $DSN PROMPT=OFF VERBOSE=OFF ERRORS=STDOUT < byteorder.sql
rm -f fc.xml
SHUTDOWN_SERVER
CHECK_LOG
BANNER "COMPLETED SERIES OF BYTEORDER TESTS"
|
trueg/virtuoso-opensource
|
binsrc/tests/suite/byteorder.sh
|
Shell
|
gpl-2.0
| 2,167 |
#!/bin/bash
########################
# 备份数据 mysql/web
# ./backup.sh live
########################
source settings.sh
DATE=`date +%Y%m%d-%H%M`
MysqlName='mysql.tar.gz'
WebName='web.tar.gz'
BackupPath=$BACKUP/$DATE
mkdir $BackupPath -p
echo "Backup Mysql"
drush sql-dump | gzip > $BackupPath/$MysqlName
echo "Backup Web"
tar -zcvf $BackupPath/$WebName $WEB
echo "Backup Success"
echo "Backup Path is: $ROOT/$BackupPath/"
|
ForeverGlory/GloryFrame
|
scripts/backup.sh
|
Shell
|
gpl-2.0
| 435 |
convert images/OCS-647-A.png -crop 1550x311+0+0 +repage images/OCS-647-A-0.png
convert -append images/OCS-646-B-8.png images/OCS-647-A-0.png images/OCS-646-B-8.png
rm images/OCS-647-A-0.png
convert images/OCS-647-A.png -crop 1550x551+0+318 +repage images/OCS-647-A-1.png
convert images/OCS-647-A.png -crop 1550x467+0+870 +repage images/OCS-647-A-2.png
convert images/OCS-647-A.png -crop 1550x390+0+1350 +repage images/OCS-647-A-3.png
convert images/OCS-647-A.png -crop 1550x563+0+1735 +repage images/OCS-647-A-4.png
convert images/OCS-647-A.png -crop 1550x400+0+2289 +repage images/OCS-647-A-5.png
convert images/OCS-647-A.png -crop 1550x315+0+2692 +repage images/OCS-647-A-6.png
convert images/OCS-647-A.png -crop 1550x951+0+3012 +repage images/OCS-647-A-7.png
convert images/OCS-647-A.png -crop 1550x557+0+3968 +repage images/OCS-647-A-8.png
#
#/OCS-647.png
convert images/OCS-647-B.png -crop 1561x871+0+0 +repage images/OCS-647-B-0.png
convert -append images/OCS-647-A-8.png images/OCS-647-B-0.png images/OCS-647-A-8.png
rm images/OCS-647-B-0.png
convert images/OCS-647-B.png -crop 1561x2379+0+872 +repage images/OCS-647-B-1.png
convert images/OCS-647-B.png -crop 1561x385+0+3260 +repage images/OCS-647-B-2.png
convert images/OCS-647-B.png -crop 1561x305+0+3658 +repage images/OCS-647-B-3.png
convert images/OCS-647-B.png -crop 1561x539+0+3972 +repage images/OCS-647-B-4.png
#
#/OCS-647.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/findindents.OCS-647.sh
|
Shell
|
gpl-2.0
| 1,394 |
#!/bin/bash
#
# This is a quick script to speed up development that kills an installed JIRA
# version.
#
./jira/bin/shutdown.sh
rm -rf atlassian-jira*;rm jira;rm -rf mysql-connector-java-5.1.5/;rm mysqlj.tgz;rm jira.sql
|
adamfisk/littleshoot-client
|
bin/bootstrap/atlassian/jira/reset.bash
|
Shell
|
gpl-2.0
| 221 |
#!/bin/sh
[ -n "$INCLUDE_ONLY" ] || {
NOT_INCLUDED=1
INCLUDE_ONLY=1
. ../netifd-proto.sh
. ./ppp.sh
init_proto "$@"
}
proto_3g_init_config() {
no_device=1
available=1
ppp_generic_init_config
proto_config_add_string "device:device"
proto_config_add_string "apn"
proto_config_add_string "service"
proto_config_add_string "pincode"
proto_config_add_string "dialnumber"
}
init_4g(){
device_at=`uci get 4g.modem.device`
#4g info dump:
a=$(gcom -d ${device_at} -s /etc/gcom/getstrength.gcom |grep "," |cut -d: -f2|cut -d, -f1)
rssi_percent=$(printf "%d%%\n" $((a*100/31)))
sim_status=$(gcom -d ${device_at} -s /etc/gcom/getsimstatus.gcom )
model=$(gcom -d ${device_at} -s /etc/gcom/getcardinfo.gcom |head -n3|tail -n1|tr -d '\r')
rev=$(gcom -d ${device_at} -s /etc/gcom/getcardinfo.gcom |grep -i rev |cut -d: -f2-|tr -d '\r')
imei=$(gcom -d ${device_at} -s /etc/gcom/getimei.gcom)
imsi=$(gcom -d ${device_at} -s /etc/gcom/getimsi.gcom)
iccid=$(gcom -d ${device_at} -s /etc/gcom/iccid_forge.gcom|cut -d: -f2)
roam=$(gcom -d ${device_at} -s /etc/gcom/checkregister.gcom )
lac=$(gcom -d ${device_at} -s /etc/gcom/getlaccellid.gcom )
reg_net=$(gcom -d ${device_at} -s /etc/gcom/getregisterednetwork.gcom |cut -d: -f2- )
uci set 4g.modem.device="${device_at}"
uci set 4g.modem.rssi="$rssi_percent"
uci set 4g.modem.sim_status="$sim_status"
uci set 4g.modem.model="$model"
uci set 4g.modem.rev="$rev"
uci set 4g.modem.imei="$imei"
uci set 4g.modem.imsi="$imsi"
uci set 4g.modem.iccid="$iccid"
uci set 4g.modem.roam="$roam"
uci set 4g.modem.lac="$lac"
uci set 4g.modem.reg_net="$reg_net"
uci commit 4g
}
proto_3g_setup() {
#init_4g
local interface="$1"
local chat
json_get_var device device
json_get_var apn apn
json_get_var service service
json_get_var pincode pincode
json_get_var dialnumber dialnumber
[ -n "$dat_device" ] && device=$dat_device
[ -e "$device" ] || {
proto_set_available "$interface" 0
return 1
}
case "$service" in
cdma|evdo)
chat="/etc/chatscripts/evdo.chat"
;;
*)
chat="/etc/chatscripts/3g.chat"
cardinfo=$(gcom -d "$device" -s /etc/gcom/getcardinfo.gcom)
if echo "$cardinfo" | grep -q Novatel; then
case "$service" in
umts_only) CODE=2;;
gprs_only) CODE=1;;
*) CODE=0;;
esac
export MODE="AT\$NWRAT=${CODE},2"
elif echo "$cardinfo" | grep -q Option; then
case "$service" in
umts_only) CODE=1;;
gprs_only) CODE=0;;
*) CODE=3;;
esac
export MODE="AT_OPSYS=${CODE}"
elif echo "$cardinfo" | grep -q "Sierra Wireless"; then
SIERRA=1
elif echo "$cardinfo" | grep -qi huawei; then
case "$service" in
umts_only) CODE="14,2";;
gprs_only) CODE="13,1";;
*) CODE="2,2";;
esac
export MODE="AT^SYSCFG=${CODE},3FFFFFFF,2,4"
fi
if [ -n "$pincode" ]; then
PINCODE="$pincode" gcom -d "$device" -s /etc/gcom/setpin.gcom || {
proto_notify_error "$interface" PIN_FAILED
proto_block_restart "$interface"
return 1
}
fi
[ -n "$MODE" ] && gcom -d "$device" -s /etc/gcom/setmode.gcom
# wait for carrier to avoid firmware stability bugs
[ -n "$SIERRA" ] && {
gcom -d "$device" -s /etc/gcom/getcarrier.gcom || return 1
}
if [ -z "$dialnumber" ]; then
dialnumber="*99***1#"
fi
;;
esac
connect="${apn:+USE_APN=$apn }DIALNUMBER=$dialnumber /usr/sbin/chat -t5 -v -E -f $chat"
ppp_generic_setup "$interface" \
noaccomp \
nopcomp \
novj \
nobsdcomp \
noauth \
lock \
crtscts \
115200 "$device"
return 0
}
proto_3g_teardown() {
proto_kill_command "$interface"
}
[ -z "NOT_INCLUDED" ] || add_protocol 3g
|
link4all/20170920openwrt
|
own_files/mt7628/files_4modem/lib/netifd/proto/3g.sh
|
Shell
|
gpl-2.0
| 3,622 |
# Easier navigation: .., ..., ...., ....., ~ and -
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias ~="cd ~" # `cd` is probably faster to type though
alias -- -="cd -"
# Shortcuts
alias d="cd ~/Documents"
alias dl="cd ~/Downloads"
alias dt="cd ~/Desktop"
alias g="git"
alias h="history"
alias j="jobs"
# Detect which `ls` flavor is in use
if ls --color > /dev/null 2>&1; then # GNU `ls`
colorflag="--color"
else # OS X `ls`
colorflag="-G"
fi
# List all files colorized in long format
alias l="ls -lF ${colorflag}"
# List all files colorized in long format, including dot files
alias la="ls -laF ${colorflag}"
# List only directories
alias lsd="ls -lF ${colorflag} | grep --color=never '^d'"
# Always use color output for `ls`
alias ls="ls ${colorflag}"
export LS_COLORS='no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:'
# Enable aliases to be sudo’ed
alias sudo='sudo '
# Get week number
alias week='date +%V'
# Stopwatch
alias timer='echo "Timer started. Stop with Ctrl-D." && date && time cat && date'
# IP addresses
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias localip="ipconfig getifaddr en0"
alias ips="ifconfig -a | grep -o 'inet6\? \(addr:\)\?\s\?\(\(\([0-9]\+\.\)\{3\}[0-9]\+\)\|[a-fA-F0-9:]\+\)' | awk '{ sub(/inet6? (addr:)? ?/, \"\"); print }'"
# Flush Directory Service cache
alias flush="dscacheutil -flushcache && killall -HUP mDNSResponder"
# Clean up LaunchServices to remove duplicates in the “Open With” menu
alias lscleanup="/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user && killall Finder"
# View HTTP traffic
alias sniff="sudo ngrep -d 'en1' -t '^(GET|POST) ' 'tcp and port 80'"
alias httpdump="sudo tcpdump -i en1 -n -s 0 -w - | grep -a -o -E \"Host\: .*|GET \/.*\""
# Recursively delete `.DS_Store` files
alias cleanup="find . -type f -name '*.DS_Store' -ls -delete"
# Empty the Trash on all mounted volumes and the main HDD
# Also, clear Apple’s System Logs to improve shell startup speed
alias emptytrash="sudo rm -rfv /Volumes/*/.Trashes; sudo rm -rfv ~/.Trash; sudo rm -rfv /private/var/log/asl/*.asl"
# Show/hide hidden files in Finder
alias show="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hide="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
# Hide/show all desktop icons (useful when presenting)
alias hidedesktop="defaults write com.apple.finder CreateDesktop -bool false && killall Finder"
alias showdesktop="defaults write com.apple.finder CreateDesktop -bool true && killall Finder"
# URL-encode strings
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
# Ring the terminal bell, and put a badge on Terminal.app’s Dock icon
# (useful when executing time-consuming commands)
alias badge="tput bel"
# Intuitive map function
# For example, to list all directories that contain a certain file:
# find . -name .gitattributes | map dirname
alias map="xargs -n1"
# One of @janmoesen’s ProTip™s
for method in GET HEAD POST PUT DELETE TRACE OPTIONS; do
alias "$method"="lwp-request -m '$method'"
done
# Stuff I never really use but cannot delete either because of http://xkcd.com/530/
alias stfu="osascript -e 'set volume output muted true'"
alias pumpitup="osascript -e 'set volume 7'"
# Reload the shell (i.e. invoke as a login shell)
alias reload="exec $SHELL -l"
|
zhiyisun/dotfiles
|
zsh/aliases.zsh
|
Shell
|
gpl-2.0
| 4,038 |
#!/bin/bash
pylupdate4 menu_builder.pro
linguist fr.ts
|
Oslandia/qgis-menu-builder
|
i18n/update_translation.sh
|
Shell
|
gpl-2.0
| 55 |
#!/bin/bash
NAME="config-fs"
ROOT_KEY="/env/testing"
MOUNT="./mount"
annonce() {
[ -n "$1" ] && echo "** $@"
}
failed() {
annonce "[failed] $@"
exit 1
}
check() {
if [ -n "$1" ]; then
echo -n "check: $2 "
eval "$1 >/dev/null"
if [ $? -ne 0 ]; then
echo "[failed]"
exit 1
fi
echo "[passed]"
fi
}
perform_clean() {
annonce "perform a cleanup"
make clean
[ -d "etcd-v2.0.0-linux-amd64" ] && rm -rf etcd-v2.0.0-linux-amd64/
pkill -9 etcd
pkill -9 config-fs
}
perform_build() {
export PATH=$PATH:${GOPATH}/bin
annonce "performing the compilation of ${NAME}"
go get
go get github.com/tools/godep
make || failed "unable to compile the ${NAME} binary"
gzip stage/config-fs -c > config-fs.gz
}
perform_setup() {
annonce "downloading the etcd service for tests"
if [ ! -f "etcd-v2.0.0-linux-amd64.tar.gz" ]; then
curl -skL https://github.com/coreos/etcd/releases/download/v2.0.0/etcd-v2.0.0-linux-amd64.tar.gz > etcd-v2.0.0-linux-amd64.tar.gz
fi
tar zxf etcd-v2.0.0-linux-amd64.tar.gz
annonce "starting the etcd service"
nohup etcd-v2.0.0-linux-amd64/etcd > etcd.log 2>&1 &
[ $? -ne 0 ] && failed "unable to start the etcd service"
ETCD="etcd-v2.0.0-linux-amd64/etcdctl --peers 127.0.0.1:4001 --no-sync"
annonce "waiting for etcd to startup"
sleep 3
annonce "starting the config-fs service"
$ETCD set ${ROOT_KEY}/created "`date`"
mkdir -p mnt/
nohup stage/config-fs -store=etcd://127.0.0.1:4001 -root=${ROOT_KEY} -mount=${MOUNT} > test.log 2>&1 &
[ $? -ne 0 ] && failed "unable to start the ${NAME} service"
sleep 3
}
perform_tests() {
annonce "performing the tests"
check "ls -l ${MOUNT}${ROOT_KEY}/created" "has the root key been created"
$ETCD rm ${ROOT_KEY}/created
check "test -d ${MOUNT}${ROOT_KEY}/created || echo -n ''" "has the root keey been delete"
}
perform_clean
perform_build
perform_setup
perform_tests
|
gambol99/config-fs
|
tests/bin/setup.sh
|
Shell
|
gpl-2.0
| 1,927 |
#Compile
if [[ $# = 1 ]]; then
. build/envsetup.sh
if [[ $? = 0 ]]; then
# Use local Java Development Kit 7
if (( $(java -version 2>&1 | grep version | cut -f2 -d".") > 7 )); then
echo "Using local OpenJDK 7..."
export JAVA_HOME=$(realpath ../jdk1.7.0_65);
fi
case $1 in
-u)
lunch myrom_kumquat-eng && make bootimage -j4;
;;
-p)
lunch full_nypon-eng && make bootimage;
;;
-s)
lunch full_pepper-eng && make bootimage;
;;
-g)
lunch full_lotus-eng && make bootimage;
;;
*)
echo "ERROR: Unknow option";
exit -1;
;;
esac
else
echo "ERROR: . build/envsetup.sh falied"
exit -1;
fi
else
echo "ERROR: Number of options not correct. Usage: ./CompileRamdisk.sh -u | -p | -s | -g"
exit -1;
fi
|
myrom/scripts
|
CompileRamdisk.sh
|
Shell
|
gpl-2.0
| 812 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/c_prog_prac_6_linked_list_2
OUTPUT_BASENAME=c_prog_prac_6_linked_list_2
PACKAGE_TOP_DIR=cprogprac6linkedlist2/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/cprogprac6linkedlist2/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/cprogprac6linkedlist2.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/cprogprac6linkedlist2.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
unfitacorn/Practicals-in-CS237-C-and-UNIX
|
Practicals/c prog prac 7 linked list 2/nbproject/Package-Debug.bash
|
Shell
|
gpl-2.0
| 1,539 |
#!/bin/bash
#@author Justin Duplessis
if [ $# -lt 3 ]
then
echo "Usage: $0 user home password";
exit 2
fi
User=$1;
Home=$2;
Password=$3;
$(useradd $User -d $Home -s /bin/false -G sftp)
$(echo $User:$Password > /tmp/$User)
$(chpasswd < /tmp/$User)
$(rm /tmp/$User)
echo "[Script] User $User was created with password $Password";
|
jdupl/Lamp3000
|
src/scripts/createUserWebHost.bash
|
Shell
|
gpl-2.0
| 336 |
#!/bin/sh
# Copyright (C) 2011-2018 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check that we don't emit harmless but useless code in the generated
# Makefile.in when the project does not use compiled languages. Motivated
# by a regression caused by removal of automatic de-ANSI-fication support:
# <https://lists.gnu.org/archive/html/automake-patches/2011-08/msg00200.html>
. test-init.sh
echo AC_OUTPUT >> configure.ac
: > Makefile.am
# Automake shouldn't need nor require these.
rm -f depcomp compile
$ACLOCAL
# Don't use '--add-missing' here, so that we can implicitly
# check that the auxiliary scripts 'compile' and 'depcomp'
# are not spuriously required by Automake.
$AUTOMAKE
$EGREP 'INCLUDES|@am__isrc@|-compile|\$\(OBJEXT\)|tab\.[ch]' \
Makefile.in && exit 1
:
|
komh/automake-os2
|
t/no-extra-makefile-code.sh
|
Shell
|
gpl-2.0
| 1,395 |
#!/bin/bash
# 本脚本提供如下功能,作者:Joey Yang, https://github.com/Code2Life
# 1. 在Ubuntu/CentOS/Fedora/ArchLinux中自动化的安装python+ansible;
# 2. clone kubeasz项目代码, 并将需要的二进制文件下载解压到/etc/ansible/bin中;
# 另外, 相关的k8s二进制文件, 我同步到了个人在七牛上的CDN存储中(速度更快), 方便大家下载: filecdn.code2life.top;
#
# 使用方法:
# 1. 支持带参数的运行, 如: ./basic-env-setup.sh k8s.193.tar.gz 指定不同的kubernetes二进制文件, 无参数时默认最新的k8s.1100.tar.gz (k8s 1.10.0 + etcd 3.3.2).
# 2. 也可以在任何一台支持的linux设备运行:curl http://filecdn.code2life.top/kubeasz-basic-env-setup.sh | sh -s
# 已经亲测centos7/ubuntu16.04/debian9/fedora27都是可以的, 二进制包下载速度贼快.脚本运行完毕之后, 只需到/etc/ansible目录下配置好hosts, 复制完ssh的公钥即可通过ansible-playbook迅速搭建集群了.
set -e
# curl http://filecdn.code2life.top/kubeasz-basic-env-setup.sh | sh -s
# 默认1.10.0 版本的 Kubernetes
bin_resource_url='http://filecdn.code2life.top/k8s.1100.tar.gz'
# 如果参数指定k8s相关的bin以指定的为准, 例如: k8s.193.tar.gz
if [ "$1" ];then
bin_resource_url="http://filecdn.code2life.top/"$1
fi
# 各Linux版本安装python/pip
# ---------------------------
# debian 默认的apt源在国内访问很慢, 可手动修改/etc/apt/source.list修改为其他源
# 以 debian 9 为例, source.list可修改为如下内容, ubuntu修改方法类似, 找到相应系统和版本的镜像源替换即可
# deb http://mirrors.163.com/debian/ stretch main non-free contrib
# deb http://mirrors.163.com/debian/ stretch-updates main non-free contrib
# deb http://mirrors.163.com/debian/ stretch-backports main non-free contrib
# deb http://mirrors.163.com/debian-security/ stretch/updates main non-free contrib
basic_ubuntu_debian() {
echo "Setup Basic Environment for Ubuntu/Debian."
apt-get update && apt-get upgrade -y && apt-get dist-upgrade -y
apt-get install python2.7 git python-pip curl -y
if [ ! -f /usr/bin/python ];then
ln -s /usr/bin/python2.7 /usr/bin/python
fi
}
# 红帽系Liunx可修改yum源加快下载速度, 修改/etc/yum.repos.d内文件即可
basic_centos() {
echo "Setup Basic Environment for CentOS."
yum install epel-release -y
yum update -y
yum erase firewalld firewalld-filesystem python-firewall -y
yum install git python python-pip curl -y
}
basic_fedora() {
echo "Setup Basic Environment for Fedora."
yum update -y
yum install git python python-pip curl -y
}
# archlinux 使用pacman进行包管理
basic_arch() {
pacman -Syu --noconfirm
pacman -S python git python-pip curl --noconfirm
}
# 使用pip安装ansible, 并下载k8s相关bin文件
setup_ansible_k8s() {
echo "Download Ansible and Kubernetes binaries."
pip install pip --upgrade -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
pip install --no-cache-dir ansible -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com
git clone https://github.com/gjmzj/kubeasz.git
mv kubeasz /etc/ansible
# Download from CDN & Move bin files
curl -o k8s_download.tar.gz "$bin_resource_url"
tar zxvf k8s_download.tar.gz
mv -f bin/* /etc/ansible/bin
rm -rf bin
echo "Finish setup. Please config your hosts and run 'ansible-playbook' command at /etc/ansible."
}
# ---------------------------
# 判断Linux发行版, 执行不同基础环境设置方法
# ---------------------------
lsb_dist=''
command_exists() {
command -v "$@" > /dev/null 2>&1
}
if command_exists lsb_release; then
lsb_dist="$(lsb_release -si)"
lsb_version="$(lsb_release -rs)"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
lsb_version="$(. /etc/lsb-release && echo "$DISTRIB_RELEASE")"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
lsb_dist='debian'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
lsb_dist='fedora'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
lsb_dist="$(cat /etc/*-release | head -n1 | cut -d " " -f1)"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
lsb_dist="$(cat /etc/*-release | head -n1 | cut -d " " -f1)"
fi
lsb_dist="$(echo $lsb_dist | cut -d " " -f1)"
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
# ---------------------------
# ---------------------------
setup_env(){
case "$lsb_dist" in
centos)
basic_centos
setup_ansible_k8s
exit 0
;;
fedora)
basic_fedora
setup_ansible_k8s
exit 0
;;
ubuntu)
basic_ubuntu_debian
setup_ansible_k8s
exit 0
;;
debian)
basic_ubuntu_debian
setup_ansible_k8s
exit 0
;;
arch)
basic_arch
setup_ansible_k8s
exit 0
;;
suse)
echo 'Not implementation yet.'
exit 1
esac
echo "Error: Unsupported OS, please set ansible environment manually."
exit 1
}
setup_env
# ---------------------------
|
VoiceAddrBook/VAB
|
tools/basic-env-setup.sh
|
Shell
|
gpl-2.0
| 5,325 |
#!/bin/bash
if [ ! -h cache.lua ]; then
ln -s ../cache.lua
fi
#luajit test_coherence.lua test_coherence.trace
luajit test_coherence.lua date.mref.log
|
zjutoe/cache_sim_lua
|
test/coherence.sh
|
Shell
|
gpl-2.0
| 157 |
#!/bin/sh
DRV_RELEASE="14.501.1003"
##############################################################
# COMMON HEADER: Initialize variables and declare subroutines
BackupInstPath()
{
if [ ! -d /etc/ati ]
then
# /etc/ati is not a directory or doesn't exist so no backup is required
return 0
fi
if [ -n "$1" ]
then
FILE_PREFIX=$1
else
# client did not pass in FILE_PREFIX parameter and /etc/ati exists
return 64
fi
if [ ! -f /etc/ati/$FILE_PREFIX ]
then
return 0
fi
COUNTER=0
ls /etc/ati/$FILE_PREFIX.backup-${COUNTER} > /dev/null 2>&1
RETURN_CODE=$?
while [ 0 -eq $RETURN_CODE ]
do
COUNTER=$((${COUNTER}+1))
ls /etc/ati/$FILE_PREFIX.backup-${COUNTER} > /dev/null 2>&1
RETURN_CODE=$?
done
cp -p /etc/ati/$FILE_PREFIX /etc/ati/$FILE_PREFIX.backup-${COUNTER}
RETURN_CODE=$?
if [ 0 -ne $RETURN_CODE ]
then
# copy failed
return 65
fi
return 0
}
UpdateInitramfs()
{
UPDATE_INITRAMFS=`which update-initramfs 2> /dev/null`
DRACUT=`which dracut 2> /dev/null`
MKINITRD=`which mkinitrd 2> /dev/null`
kernel_release=`uname -r`
kernel_version=`echo $kernel_release | cut -d"." -f 1`
kernel_release_rest=`echo $kernel_release | cut -d"." -f 2`
kernel_major_rev=`echo $kernel_release_rest | cut -d"-" -f 1`
kernel_major_rev=`echo $kernel_major_rev | cut -d"." -f 1`
if [ $kernel_version -gt 2 ]; then
#not used
kernel_minor_rev=0
else
kernel_minor_rev=`echo $kernel_release | cut -d"." -f 3 | cut -d"-" -f 1`
fi
if [ $kernel_version -gt 2 -o \( $kernel_version -eq 2 -a $kernel_major_rev -ge 6 -a $kernel_minor_rev -ge 32 \) ]; then
if [ -n "${UPDATE_INITRAMFS}" -a -x "${UPDATE_INITRAMFS}" ]; then
#update initramfs for current kernel by specifying kernel version
${UPDATE_INITRAMFS} -u -k `uname -r` > /dev/null
#update initramfs for latest kernel (default)
${UPDATE_INITRAMFS} -u > /dev/null
echo "[Reboot] Kernel Module : update-initramfs" >> ${LOG_FILE}
elif [ -n "${DRACUT}" -a -x "${DRACUT}" ]; then
#RedHat/Fedora
${DRACUT} -f > /dev/null
echo "[Reboot] Kernel Module : dracut" >> ${LOG_FILE}
elif [ -n "${MKINITRD}" -a -x "${MKINITRD}" ]; then
#Novell
${MKINITRD} > /dev/null
echo "[Reboot] Kernel Module : mkinitrd" >> ${LOG_FILE}
fi
else
echo "[Message] Kernel Module : update initramfs not required" >> ${LOG_FILE}
fi
}
# i.e., lib for 32-bit and lib64 for 64-bit.
if [ `uname -m` = "x86_64" ];
then
LIB=lib64
else
LIB=lib
fi
# LIB32 always points to the 32-bit libraries (native in 32-bit,
# 32-on-64 in 64-bit) regardless of the system native bitwidth.
# Use lib32 and lib64; if lib32 doesn't exist assume lib is for lib32
if [ -d "/usr/lib32" ]; then
LIB32=lib32
else
LIB32=lib
fi
#process INSTALLPATH, if it's "/" then need to purge it
#SETUP_INSTALLPATH is a Loki Setup environment variable
INSTALLPATH=${SETUP_INSTALLPATH}
if [ "${INSTALLPATH}" = "/" ]
then
INSTALLPATH=""
fi
# project name and derived defines
MODULE=fglrx
IP_LIB_PREFIX=lib${MODULE}_ip
# general purpose paths
XF_BIN=${INSTALLPATH}${ATI_X_BIN}
XF_LIB=${INSTALLPATH}${ATI_XLIB}
OS_MOD=${INSTALLPATH}`dirname ${ATI_KERN_MOD}`
USR_LIB=${INSTALLPATH}/usr/${LIB}
MODULE=`basename ${ATI_KERN_MOD}`
#FGLRX install log
LOG_PATH=${INSTALLPATH}${ATI_LOG}
LOG_FILE=${LOG_PATH}/fglrx-install.log
if [ ! -e ${LOG_PATH} ]
then
mkdir -p ${LOG_PATH} 2>/dev/null
fi
if [ ! -e ${LOG_FILE} ]
then
touch ${LOG_FILE}
fi
#DKMS version
DKMS_VER=`dkms -V 2> /dev/null | cut -d " " -f2`
#DKMS expects kernel module sources to be placed under this directory
DKMS_KM_SOURCE=/usr/src/${MODULE}-${DRV_RELEASE}
# END OF COMMON HEADER
#######################
###Begin: post_drv1 ###
# cover SuSE special case...
if [ `ls -1 ${INSTALLPATH}/usr/X11R6/bin/switch2* 2>/dev/null | grep "" -c 2>/dev/null` -gt 0 ]
then
if [ -e ${INSTALLPATH}/usr/X11R6/bin/switch2xf86-4 ]
then
${INSTALLPATH}/usr/X11R6/bin/switch2xf86-4
fi
if [ -e ${INSTALLPATH}/usr/X11R6/bin/switch2xf86_glx ]
then
echo "[Warning] Driver : swiching OpenGL library support to XFree86 4.x.x DRI method" >> ${LOG_FILE}
else
echo "[Warning] Driver : can't switch OpenGL library support to XFree86 4.x.x DRI method" >> ${LOG_FILE}
echo "[Warning] : because package xf86_glx-4.*.i386.rpm is not installed." >> ${LOG_FILE}
echo "[Warning] : please install and run switch2xf86_glx afterwards." >> ${LOG_FILE}
fi
fi
GLDRISEARCHPATH=${INSTALLPATH}${ATI_3D_DRV_32}
LDLIBSEARCHPATHX=${INSTALLPATH}${ATI_XLIB_32}
if [ -n "${ATI_XLIB_64}" -a -n "${ATI_3D_DRV_64}" ]
then
GLDRISEARCHPATH=${GLDRISEARCHPATH}:${INSTALLPATH}${ATI_3D_DRV_64}
LDLIBSEARCHPATHX=${LDLIBSEARCHPATHX}:${INSTALLPATH}${ATI_XLIB_64}
fi
# set environment variable LD_LIBRARY_PATH
# add ATI_PROFILE script located in
# - /etc/profile.d if dir exists, else
# - /etc/ati and add a line in /etc/profile for sourcing
ATI_PROFILE_START="### START ATI FGLRX ###"
ATI_PROFILE_END="### END ATI FGLRX ###"
ATI_PROFILE_FNAME="ati-fglrx"
ATI_PROFILE="### START ATI FGLRX ###
### Automatically modified by ATI Proprietary driver scripts
### Please do not modify between START ATI FGLRX and END ATI FGLRX
#setting LD_LIBRARY_PATH is not required for ATI FGLRX
#if [ \$LD_LIBRARY_PATH ]
#then
# if ! set | grep LD_LIBRARY_PATH | grep ${LDLIBSEARCHPATHX} > /dev/null
# then
#LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:${LDLIBSEARCHPATHX}
#export LD_LIBRARY_PATH
# fi
#else
#LD_LIBRARY_PATH=${LDLIBSEARCHPATHX}
#export LD_LIBRARY_PATH
#fi
if [ \$LIBGL_DRIVERS_PATH ]
then
if ! set | grep LIBGL_DRIVERS_PATH | grep ${GLDRISEARCHPATH} > /dev/null
then
LIBGL_DRIVERS_PATH=\$LIBGL_DRIVERS_PATH:${GLDRISEARCHPATH}
export LIBGL_DRIVERS_PATH
fi
else
LIBGL_DRIVERS_PATH=${GLDRISEARCHPATH}
export LIBGL_DRIVERS_PATH
fi
### END ATI FGLRX ###
"
# replaces any previous script if existing
ATI_PROFILE_FILE1="/etc/profile.d/${ATI_PROFILE_FNAME}.sh"
ATI_PROFILE_FILE2="/etc/ati/${ATI_PROFILE_FNAME}.sh"
if [ -d `dirname ${ATI_PROFILE_FILE1}` ];
then
printf "${ATI_PROFILE}" > ${ATI_PROFILE_FILE1}
chmod +x ${ATI_PROFILE_FILE1}
elif [ -d `dirname ${ATI_PROFILE_FILE2}` ];
then
printf "${ATI_PROFILE}" > ${ATI_PROFILE_FILE2}
chmod +x ${ATI_PROFILE_FILE2}
PROFILE_COMMENT=" # Do not modify - set by ATI FGLRX"
PROFILE_LINE="\. /etc/ati/${ATI_PROFILE_FNAME}\.sh ${PROFILE_COMMENT}"
if ! grep -e "${PROFILE_LINE}" /etc/profile > /dev/null
then
PROFILE_LINE=". ${ATI_PROFILE_FILE2} ${PROFILE_COMMENT}"
printf "${PROFILE_LINE}\n" >> /etc/profile
fi
fi
#create user profile with write access if user profile does not exist
#or running with force, without preserve
if [ ! -f "${ATI_CONFIG}/atiapfuser.blb" -o "${FORCE_ATI_UNINSTALL}" = "y" ]; then
rm -f "${ATI_CONFIG}/atiapfuser.blb"
touch "${ATI_CONFIG}/atiapfuser.blb"
chmod a+w "${ATI_CONFIG}/atiapfuser.blb"
fi
###End: post_drv1 ###
###Begin: post_drv2 ###
# manage lib dir contents
XF_BIN=${INSTALLPATH}${ATI_X_BIN}
XF_LIB=${INSTALLPATH}${ATI_XLIB}
XF_LIB32=${INSTALLPATH}${ATI_XLIB_32}
XF_LIB_EXT=${INSTALLPATH}${ATI_X_MODULE}/extensions
XF_LIB_EXT32=${INSTALLPATH}${ATI_XLIB_EXT_32}
USR_LIB=${INSTALLPATH}/usr/${LIB}
USR_LIB32=${INSTALLPATH}/usr/${LIB32}
# cleanup standard symlinks
rm -f $XF_LIB/libGL.so
rm -f $XF_LIB/libGL.so.1
rm -f $USR_LIB/libGL.so
rm -f $USR_LIB/libGL.so.1
# create standard symlinks
# *** NOTICE *** #
# If our libGL.so.1.2 changes version, or the GL libraries
# change, this code becomes obsolete.
ln -s $XF_LIB/fglrx/fglrx-libGL.so.1.2 $XF_LIB/libGL.so.1.2
ln -s $XF_LIB/libGL.so.1.2 $XF_LIB/libGL.so.1
ln -s $XF_LIB/libGL.so.1 $XF_LIB/libGL.so
#MM creation of sym links
if [ -d $XF_LIB/dri/ ]; then
ln -s $XF_LIB/libXvBAW.so.1.0 $XF_LIB/dri/fglrx_drv_video.so
fi
if [ "${XF_LIB}" != "${USR_LIB}" ]; then
ln -s $XF_LIB/fglrx/fglrx-libGL.so.1.2 $USR_LIB/libGL.so.1.2
ln -s $USR_LIB/libGL.so.1.2 $USR_LIB/libGL.so.1
ln -s $USR_LIB/libGL.so.1 $USR_LIB/libGL.so
if [ -d $USR_LIB/dri/ ]; then
ln -s $USR_LIB/libXvBAW.so.1.0 $USR_LIB/dri/fglrx_drv_video.so
fi
fi
#Create proper sym link to avoid conflict with libglx.so
if [ -e $XF_LIB_EXT/fglrx/fglrx-libglx.so ]; then
ln -s $XF_LIB_EXT/fglrx/fglrx-libglx.so $XF_LIB_EXT/libglx.so
fi
# cleanup/create symlinks for 32-on-64 only if needed
if [ "$LIB" != "$LIB32" ];
then
rm -f $XF_LIB32/libGL.so
rm -f $XF_LIB32/libGL.so.1
rm -f $USR_LIB32/libGL.so
rm -f $USR_LIB32/libGL.so.1
# *** NOTICE *** #
# If our libGL.so.1.2 changes version, or the GL libraries
# change, this code becomes obsolete.
ln -s $XF_LIB32/fglrx/fglrx-libGL.so.1.2 $XF_LIB32/libGL.so.1.2
ln -s $XF_LIB32/libGL.so.1.2 $XF_LIB32/libGL.so.1
ln -s $XF_LIB32/libGL.so.1 $XF_LIB32/libGL.so
if [ "${XF_LIB32}" != "${USR_LIB32}" ]; then
ln -s $XF_LIB32/fglrx/fglrx-libGL.so.1.2 $USR_LIB32/libGL.so.1.2
ln -s $USR_LIB32/libGL.so.1.2 $USR_LIB32/libGL.so.1
ln -s $USR_LIB32/libGL.so.1 $USR_LIB32/libGL.so
fi
#Create proper sym link to avoid conflict with libglx from Xorg package
if [ -e $XF_LIB_EXT32/fglrx/fglrx-libglx.so ]; then
ln -s $XF_LIB_EXT32/fglrx/fglrx-libglx.so $XF_LIB_EXT32/libglx.so
fi
fi
#MM creation on UB systems
##ToDO:this can be avoided by having proper global variable
if [ `uname -m` = "x86_64" -a \
-d "/usr/lib/x86_64-linux-gnu" ];
then
ln -s /usr/lib/libXvBAW.so.1.0 /usr/lib/x86_64-linux-gnu/dri/fglrx_drv_video.so
elif [ -d "/usr/lib/i386-linux-gnu" ];
then
ln -s /usr/lib/libXvBAW.so.1.0 /usr/lib/i386-linux-gnu/dri/fglrx_drv_video.so
fi
#try to fixup the glx/GL alternative after symlinks created
DisString=`lsb_release -i`
DID=`echo $DisString | awk '{ print $3 }'`
glxSlave="/usr/lib/xorg/modules/linux/libglx.so glx--linux-libglx.so /usr/lib/xorg/modules/extensions/fglrx/fglrx-libglx.so"
i386GLSlave="/usr/lib/i386-linux-gnu/libGL.so.1 glx--libGL.so.1-i386-linux-gnu /usr/lib/i386-linux-gnu/fglrx/fglrx-libGL.so.1.2"
x8664GLSlave="/usr/lib/x86_64-linux-gnu/libGL.so.1 glx--libGL.so.1-x86_64-linux-gnu /usr/lib/fglrx/fglrx-libGL.so.1.2"
if [ "$DID" = "SteamOS" ]
then
update-alternatives --install /usr/lib/glx glx /usr/lib/fglrx 99 --slave $glxSlave --slave $i386GLSlave --slave $x8664GLSlave > /dev/null
update-alternatives --set glx /usr/lib/fglrx > /dev/null
fi
#for those systems that don't look
/sbin/ldconfig -n ${XF_LIB}
#not really needed? (only libGL, which was manually linked above)
if [ "${LIB}" != "${LIB32}" ]; then
/sbin/ldconfig -n ${XF_LIB32}
fi
# rebuild any remaining library symlinks
/sbin/ldconfig
#set sticky bit for amd-console-helper
chmod a+s $XF_BIN/amd-console-helper
#reset driver version in database
ATICONFIG_BIN=`which aticonfig` 2> /dev/null
if [ -n "${ATICONFIG_BIN}" -a -x "${ATICONFIG_BIN}" ]; then
${ATICONFIG_BIN} --del-pcs-key=LDC,ReleaseVersion > /dev/null 2>&1
${ATICONFIG_BIN} --del-pcs-key=LDC,Catalyst_Version > /dev/null 2>&1
fi
###End: post_drv2 ###
exit 0
|
Scorpio92/linux_kernel_3.18.5
|
drv/post_drv.sh
|
Shell
|
gpl-2.0
| 11,423 |
#!/bin/bash
# make gene example
../../../bin/spimap \
-a 100.nt.align \
-s ../../../examples/config/fungi.stree \
-S ../../../examples/config/fungi.smap \
-p ../../../examples/train/fungi.params \
-o 100 \
-D 0.000564 \
-L 0.003056 \
-i 100 \
--quickiter 1000 \
-V 1 --log -
cat > 100.yeast.tree <<EOF
(
(
(
(
(
(
(
YER061C:0.065684,
spar_6281:0.059258
)n9:0.024319,
smik_6662:0.103443
)n8:0.016745,
sbay_7039:0.089961
)n7:0.005255,
(
smik_6659:0.092338,
sbay_7037:0.127706
)n10:0.014401
)n6:0.180075,
CAGL0J02970g:0.290991
)n5:0.095828,
scas_g715.48:0.348032
)n4:0.071532,
(
kwal_5828:0.302079,
(
ADL072C:0.365623,
KLLA0C08239g:0.460869
)n3:0.116995
)n2:0.054779
)n1;
EOF
~/projects/dlcoal/bin/mpr \
-s ../../../examples/config/fungi.stree \
-S ../../../examples/config/fungi.smap \
-I .tree -O .mpr 100.yeast.tree
tree-relations -S ../../../examples/config/fungi.smap -s ../../../examples/config/fungi.stree -R .mpr.recon -T .tree 100.yeast.tree
#=============================================================================
# not needed
viewtree -g 100.yeast.tree.svg \
-l 350 -n \
-S ../../../examples/config/fungi.smap \
-s ../../../examples/config/fungi.stree \
100.yeast.tree
convert 100.yeast.tree.svg 100.yeast.tree.png
|
mdrasmus/spimap
|
doc/figures/gene-tree/make.sh
|
Shell
|
gpl-2.0
| 1,578 |
#! /bin/sh
#
# This is kernel build script for debian lenny's 2.6.26 kernel.
#
die () {
echo $1
exit 1
}
generate_meta_package() {
[ -r $1 ] || die "Can't find $1 ."
dpkg-deb -x $1 tmp
dpkg-deb -e $1 tmp/DEBIAN
dir=`echo -n tmp/usr/share/doc/*`
mv ${dir} ${dir}-ccs
sed -i -e 's:-686:-686-ccs:' -- tmp/DEBIAN/md5sums tmp/DEBIAN/control
dpkg-deb -b tmp && mv tmp.deb $2
rm -fR tmp
}
export CONCURRENCY_LEVEL=`grep -c '^processor' /proc/cpuinfo` || die "Can't export."
apt-get -y install wget
for key in 19A42D19 9B441EA8
do
gpg --list-keys $key 2> /dev/null > /dev/null || wget -O - 'http://pgp.nic.ad.jp/pks/lookup?op=get&search=0x'$key | gpg --import || die "Can't import PGP key."
done
# Download TOMOYO Linux patches.
mkdir -p /usr/src/rpm/SOURCES/
cd /usr/src/rpm/SOURCES/ || die "Can't chdir to /usr/src/rpm/SOURCES/ ."
if [ ! -r ccs-patch-1.7.2-20110121.tar.gz ]
then
wget -O ccs-patch-1.7.2-20110121.tar.gz 'http://sourceforge.jp/frs/redir.php?f=/tomoyo/43375/ccs-patch-1.7.2-20110121.tar.gz' || die "Can't download patch."
fi
# Install kernel source packages.
cd /usr/src/ || die "Can't chdir to /usr/src/ ."
apt-get install fakeroot build-essential || die "Can't install packages."
apt-get build-dep linux-image-2.6.26-2-686 || die "Can't install packages."
apt-get source linux-image-2.6.26-2-686 || die "Can't install kernel source."
# Apply patches and create kernel config.
cd linux-2.6-2.6.26 || die "Can't chdir to linux-2.6-2.6.26/ ."
tar -zxf /usr/src/rpm/SOURCES/ccs-patch-1.7.2-20110121.tar.gz || die "Can't extract patch."
patch -p1 < patches/ccs-patch-2.6.26-debian-lenny.diff || die "Can't apply patch."
cat /boot/config-2.6.26-2-686 config.ccs > .config || die "Can't create config."
make -s oldconfig
# Start compilation.
REVISION=`head -n 1 debian/changelog | awk ' { print $2 } ' | awk -F'(' ' { print $2 } ' | awk -F')' ' { print $1 } '`
make-kpkg --append-to-version -2-686-ccs --initrd --revision $REVISION linux-image || die "Failed to build kernel package."
# Generate meta packages.
wget http://ftp.jp.debian.org/debian/pool/main/l/linux-latest-2.6/linux-image-2.6-686_2.6.26+17+lenny1_i386.deb
generate_meta_package linux-image-2.6-686_2.6.26+17+lenny1_i386.deb linux-image-2.6-686-ccs_2.6.26+17+lenny1_i386.deb
exit 0
|
renaudallard/kernel-froyo-GT-I9000
|
specs/build-debian_lenny.sh
|
Shell
|
gpl-2.0
| 2,309 |
#!/bin/bash
#CACHE_DIR=/usr/local/vufind/httpd/local/cache
#if [ "$UID" -ne 0 ]; then
# echo "you have to be root to use the git update script because cache will be cleared"
# exit 1
#fi
BASEDIR=$(dirname $0)
INDEX="$BASEDIR/../public/index.php"
if [ -z "$LOCAL_DIR" ]; # if $LOCAL_DIR empty or unset, use default localdir
then export VUFIND_LOCAL_DIR=${BASEDIR}/../local;
else export VUFIND_LOCAL_DIR=$LOCAL_DIR;
fi
export VUFIND_LOCAL_MODULES=Swissbib
export VUFIND_LOCAL_DIR
#export APPLICATION_ENV=development
php $INDEX tab40import $@
#su -c "php $INDEX tab40import $@" vfsb
#please do not delete a directory with options -rf as root based on a relative directory! GH
#rm -rf $CACHE_DIR/*
|
swissbib/vufind
|
cli/tab40import.sh
|
Shell
|
gpl-2.0
| 724 |
#!/bin/sh
. $TESTSLIB/systemd.sh
wait_for_ssh(){
retry=300
while ! execute_remote true; do
retry=$(( retry - 1 ))
if [ $retry -le 0 ]; then
echo "Timed out waiting for ssh. Aborting!"
exit 1
fi
sleep 1
done
}
prepare_ssh(){
execute_remote "sudo adduser --extrausers --quiet --disabled-password --gecos '' test"
execute_remote "echo test:ubuntu | sudo chpasswd"
execute_remote "echo 'test ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/test-user"
}
create_assertions_disk(){
dd if=/dev/null of=assertions.disk bs=1M seek=1
mkfs.ext4 -F assertions.disk
mkdir /mnt/assertions
mount -t ext4 -o loop assertions.disk /mnt/assertions
cp $TESTSLIB/assertions/auto-import.assert /mnt/assertions
umount /mnt/assertions && rm -rf /mnt/assertions
}
create_nested_core_vm(){
# determine arch related vars
case "$NESTED_ARCH" in
amd64)
QEMU="$(which qemu-system-x86_64)"
;;
i386)
QEMU="$(which qemu-system-i386)"
;;
*)
echo "unsupported architecture"
exit 1
;;
esac
# create ubuntu-core image
mkdir -p /tmp/work-dir
/snap/bin/ubuntu-image --image-size 3G $TESTSLIB/assertions/nested-${NESTED_ARCH}.model --channel $CORE_CHANNEL --output ubuntu-core.img
mv ubuntu-core.img /tmp/work-dir
create_assertions_disk
systemd_create_and_start_unit nested-vm "${QEMU} -m 1024 -nographic -net nic,model=virtio -net user,hostfwd=tcp::8022-:22 -drive file=/tmp/work-dir/ubuntu-core.img,if=virtio,cache=none -drive file=${PWD}/assertions.disk,if=virtio,cache=none"
wait_for_ssh
prepare_ssh
}
destroy_nested_core_vm(){
systemd_stop_and_destroy_unit nested-vm
rm -rf /tmp/work-dir
}
execute_remote(){
sshpass -p ubuntu ssh -p 8022 -q -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no user1@localhost "$*"
}
|
morphis/snapd
|
tests/lib/nested.sh
|
Shell
|
gpl-3.0
| 1,949 |
#!/bin/bash
# This file includes example uses for EnsembleRNA. You can test them by copy and pasting them into
# the command line or by running this script. These are only a few examples to help you get started.
# Try mixing and matching the different commands to personalize your visualization.
#######################################################################################################
# Show the help for EnsembleRNA
#######################################################################################################
ensemblerna -h
#######################################################################################################
# Run with reference fasta file (map will be created from the reference sequence)
#######################################################################################################
ensemblerna ref_WT.fa example_output
#######################################################################################################
# Run with reference fasta file (map will be created from the reference sequence)
# Ex. Guide the structure prediction using SHAPE data
#######################################################################################################
ensemblerna ref_WT.fa example_output -sh ref_WT.shape
#######################################################################################################
# Run with reference fasta file and map fasta file (map will be created from the map sequence)
# Ex. Use the wild-type to create a single map and compare different variants as references
#######################################################################################################
ensemblerna ref_WT.fa example_output
ensemblerna ref_MT.fa example_output -m ref_WT.fa
#######################################################################################################
# Run with reference db file (map will be created from reference sequence)
# Ex. Visualize an ensemble of structures generated using a different sampling algorithm
#######################################################################################################
ensemblerna ref_WT.fa example_output -d ref_WT.db
#######################################################################################################
# Run with reference fasta file and map db file (map will be created from dot-bracket structures)
# Ex. Quickly compare mutants without recreating the map dot-bracket each time
#######################################################################################################
ensemblerna ref_WT.fa example_output
ensemblerna ref_MT.fa example_output -md example_output/ref_WT_map.db
#######################################################################################################
# Run with reference fasta file (map will be created from reference sequence)
# Ex. Increase the structural space explored by the map for longer RNAs
#######################################################################################################
ensemblerna ref_WT.fa example_output/ -s 15
|
cbtolson/ensemblerna_package
|
examples/example.sh
|
Shell
|
gpl-3.0
| 3,093 |
#!/bin/sh
# Ensure all javascript files staged for commit pass standard code style
git diff --name-only --cached --relative | grep '\.jsx\?$' | xargs standard
if [ $? -ne 0 ]; then exit 1; fi
|
icymind/VRouter
|
.pre-commit.sh
|
Shell
|
gpl-3.0
| 192 |
#!/bin/bash
set -x
NN=4 # number of processors
#pismdir=../../builddev
pismdir=../../buildtill
#for MM in 11 21 41 81 161 321 641;
for MM in 11 21 41;
do
rm -f foo.txt
#./runTestP.py $pismdir "mpiexec -n ${NN}" $MM &> foo.txt
time ./runTestP.py --pism_path=$pismdir --mpiexec="mpiexec -n ${NN}" --Mx=$MM --keep &> runP$MM.txt
echo "results for Mx=My=${MM}:"
cat runP$MM.txt |grep "Drift in"
done
|
talbrecht/pism_pik06
|
test/test_hydrology/verifTestP.sh
|
Shell
|
gpl-3.0
| 411 |
#!/bin/bash
for i in `ls data/*.csv`; do
soubor=`echo "$i" | sed "s/\.csv//" | sed "s/data\///"`
cp data/$soubor.csv csv/$soubor.csv
gawk -v regexp='Ne .* 00:00:00' -f prevod.awk "data/$soubor.csv" > csv/$soubor-tydny.csv
gawk -v regexp='00:00:00' -f prevod.awk "data/$soubor.csv" > csv/$soubor-dny.csv
gawk -v regexp=' 01 00:00:00' -f prevod.awk "data/$soubor.csv" > csv/$soubor-mesice.csv
gawk -v regexp='led 01 00:00:00' -f prevod.awk "data/$soubor.csv" > csv/$soubor-roky.csv
done
|
PetrDlouhy/scitace
|
make_csv.sh
|
Shell
|
gpl-3.0
| 505 |
#!/bin/bash
# Lidarr configuration for nginx
# Author: liara
# Copyright (C) 2019 Swizzin
# Licensed under GNU General Public License v3.0 GPL-3 (in short)
#
# You may copy, distribute and modify the software as long as you track
# changes/dates in source files. Any modifications to our software
# including (via compiler) GPL-licensed code must also be made available
# under the GPL along with build & install instructions.
user=$(cut -d: -f1 < /root/.master.info)
isactive=$(systemctl is-active lidarr)
if [[ $isactive == "active" ]]; then
systemctl stop lidarr
fi
cat > /etc/nginx/apps/lidarr.conf <<LIDN
location /lidarr {
proxy_pass http://127.0.0.1:8686/lidarr;
proxy_set_header Host \$proxy_host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_redirect off;
auth_basic "What's the password?";
auth_basic_user_file /etc/htpasswd.d/htpasswd.${user};
}
LIDN
if [[ ! -d /home/${user}/.config/Lidarr/ ]]; then mkdir -p /home/${user}/.config/Lidarr/; fi
cat > /home/${user}/.config/Lidarr/config.xml <<LID
<Config>
<Port>8686</Port>
<UrlBase>lidarr</UrlBase>
<BindAddress>127.0.0.1</BindAddress>
<EnableSsl>False</EnableSsl>
<LogLevel>Info</LogLevel>
<LaunchBrowser>False</LaunchBrowser>
</Config>
LID
chown -R ${user}: /home/${user}/.config
if [[ $isactive == "active" ]]; then
systemctl start lidarr
fi
|
liaralabs/swizzin
|
scripts/nginx/lidarr.sh
|
Shell
|
gpl-3.0
| 1,425 |
#!/bin/bash
#
# tools/init_tools.sh
#
# Copyright (c) 2017-2018 Decorum Development Team <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
get_crosstool_ng() {
cd $DIR/tools/src
addr="https://github.com/crosstool-ng/crosstool-ng"
echo "Checking crosstool-ng-repo is alive.."
x=$(check_connect $addr)
if [ $x == "ok" ]; then
git clone $addr
cd crosstool-ng
git checkout origin/master
else
connect_err "crosstool-ng repo"
fi
}
build_crosstool_ng() {
cd $DIR/tools/src/crosstool-ng
./bootstrap
./configure --prefix=$DIR/tools/
make
make install
}
get_sqlite() {
cd $DIR/tools/src/
addr="https://www.sqlite.org/src/tarball/sqlite.tar.gz?r=release"
echo "Checking sqlite is alive.."
x=$(check_connect $addr)
if [ $x == "ok" ]; then
wget -O sqlite.tar.xz $addr
tar -xf sqlite.tar.xz
rm sqlite.tar.xz
else
connect_err "sqlite tar mirror"
fi
}
build_sqlite() {
cd $DIR/tools/src/sqlite
}
get_shyapa() {
echo ""
}
get_tools() {
mkdir $DIR/tools/src
get_crosstool_ng
get_sqlite
get_shyarea
}
build_tools() {
build_crosstool_ng
}
install_tools() {
clear
rm -rf $DIR/tools
mkdir $DIR/tools
get_tools
#build_tools
echo "Tools installed. Press any key to continue."
read -n 1 -s
}
|
p01arst0rm/decorum-linux
|
_include/mk/mk_tools.sh
|
Shell
|
gpl-3.0
| 1,913 |
#! /bin/bash
# A modification of Dean Clatworthy's deploy script as found here: https://github.com/deanc/wordpress-plugin-git-svn
# The difference is that this script lives in the plugin's git repo & doesn't require an existing SVN repo.
# main config
PLUGINSLUG="reset-slugs"
CURRENTDIR=`pwd`
MAINFILE="wp-reset-slug.php" # this should be the name of your main php file in the wordpress plugin
# git config
GITPATH="$CURRENTDIR/" # this file should be in the base of your git repository
# svn config
SVNPATH="/tmp/$PLUGINSLUG" # path to a temp SVN repo. No trailing slash required and don't add trunk.
SVNURL="http://plugins.svn.wordpress.org/$PLUGINSLUG/" # Remote SVN repo on wordpress.org, with no trailing slash
SVNUSER="codearachnid" # your svn username
# Let's begin...
echo ".........................................."
echo
echo "Preparing to deploy wordpress plugin"
echo
echo ".........................................."
echo
# Check version in readme.txt is the same as plugin file after translating both to unix line breaks to work around grep's failure to identify mac line breaks
NEWVERSION1=`grep "^Stable tag:" $GITPATH/readme.txt | awk -F' ' '{print $NF}'`
echo "readme.txt version: $NEWVERSION1"
echo "$GITPATH$MAINFILE"
NEWVERSION2=`grep "Version:" $GITPATH$MAINFILE | awk -F' ' '{print $NF}'`
echo "$MAINFILE version: $NEWVERSION2"
if [ "$NEWVERSION1" -ne "$NEWVERSION2" ]; then echo "Version in readme.txt & $MAINFILE don't match. Exiting...."; exit 1; fi
echo "Versions match in readme.txt and $MAINFILE. Let's proceed..."
if git show-ref --tags --quiet --verify -- "refs/tags/$NEWVERSION1"
then
echo "Version $NEWVERSION1 already exists as git tag. Exiting....";
exit 1;
else
echo "Git version does not exist. Let's proceed..."
fi
cd $GITPATH
echo -e "Enter a commit message for this new version: \c"
read COMMITMSG
git commit -am "$COMMITMSG"
echo "Tagging new version in git"
git tag -a "$NEWVERSION1" -m "Tagging version $NEWVERSION1"
echo "Pushing latest commit to origin, with tags"
git push origin master
git push origin master --tags
echo
echo "Creating local copy of SVN repo ..."
svn co $SVNURL $SVNPATH
echo "Exporting the HEAD of master from git to the trunk of SVN"
git checkout-index -a -f --prefix=$SVNPATH/trunk/
echo "Ignoring github specific files and deployment script"
svn propset svn:ignore "deploy.sh
README.md
.git
.gitignore" "$SVNPATH/trunk/"
echo "Changing directory to SVN and committing to trunk"
cd $SVNPATH/trunk/
# Add all new files that are not set to be ignored
svn status | grep -v "^.[ \t]*\..*" | grep "^?" | awk '{print $2}' | xargs svn add
svn commit --username=$SVNUSER -m "$COMMITMSG"
echo "Creating new SVN tag & committing it"
cd $SVNPATH
svn copy trunk/ tags/$NEWVERSION1/
cd $SVNPATH/tags/$NEWVERSION1
svn commit --username=$SVNUSER -m "Tagging version $NEWVERSION1"
echo "Removing temporary directory $SVNPATH"
rm -fr $SVNPATH/
echo "*** FIN ***"
|
codearachnid/wp-reset-slug
|
deploy.sh
|
Shell
|
gpl-3.0
| 2,971 |
#!/bin/bash
# -*- coding: utf-8 -*-
# Executes ddrescue for Linux when requested for DDRescue-GUI Version 2.0.0.
# This file is part of DDRescue-GUI.
# Copyright (C) 2013-2018 Hamish McIntyre-Bhatty
# DDRescue-GUI is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 or,
# at your option, any later version.
#
# DDRescue-GUI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DDRescue-GUI. If not, see <http://www.gnu.org/licenses/>.
#Keep processes' stderr by redirecting it to stdout.
$@ 2>&1
exit $?
|
hamishmb/ddrescue-gui
|
Tools/helpers/runasroot_linux_ddrescue.sh
|
Shell
|
gpl-3.0
| 831 |
#!/bin/bash
FONT_D=/usr/local/share/fonts
# Fixedsys 3
wget http://www.fixedsysexcelsior.com/fonts/FSEX300.ttf
if [[ ! -e ${FONT_D} ]]; then
sudo mkdir ${FONT_D}
fi
sudo mv -vf *.ttf ${FONT_D}
sudo chmod 775 -R ${FONT_D}
sudo fc-cache -fv
|
huyht1205/scripts
|
automation/font/install_font.sh
|
Shell
|
gpl-3.0
| 243 |
binutils=binutils:arc-2021.03-rc2
gcc=gcc:arc-2021.03-rc2
gdb=gdb:arc-2021.03-rc2-gdb
newlib=newlib:arc-2021.03-rc2
glibc=glibc:glibc-2.33
uclibc=uclibc-ng:v1.0.34
linux=linux:v5.1
|
foss-for-synopsys-dwc-arc-processors/toolchain
|
config/arc-2021.03-rc2.sh
|
Shell
|
gpl-3.0
| 181 |
:
# $APPASERVER_HOME/src_predictive/ledger_no_journal_delete.sh
# -----------------------------------------------------------
if [ "$APPASERVER_DATABASE" != "" ]
then
application=$APPASERVER_DATABASE
elif [ "$DATABASE" != "" ]
then
application=$DATABASE
fi
if [ "$application" = "" ]
then
echo "Error in `basename.e $0 n`: you must first:" 1>&2
echo "\$ . set_database" 1>&2
exit 1
fi
# Input
# -----
if [ "$#" -ne 1 ]
then
echo "Usage: $0 begin_date" 1>&2
exit 1
fi
begin_date=$1
table=transaction
field=full_name,street_address,transaction_date_time
ledger_debit_credit_audit "$begin_date" |
grep 'no_journal$' |
delete_statement table=$table field=$field delimiter='^' |
cat
exit 0
|
timhriley/appaserver
|
src_predictive/ledger_no_journal_delete.sh
|
Shell
|
gpl-3.0
| 708 |
#!/bin/bash
#
P_MODE=$1;
P_FILES="travis.files-status.before travis.directories-status.before travis.files-status.after travis.directories-status.after";
P_PLACES="site modules cache/system";
for f in $P_FILES; do
touch $f;
done;
if [ "$P_MODE" == "before" ] || [ "$P_MODE" == "after" ]; then
find $P_PLACES -type f > travis.files-status.${P_MODE};
find $P_PLACES -type d > travis.directories-status.${P_MODE};
elif [ "$P_MODE" == "compare" ]; then
padding='BEGIN{x=0}/^(<|>)/{x++;print "\t"$0}END{if(x==0){print "\t\033[1;32mNo difference\033[0m"}}';
echo "Comparing files 'before' <-> 'after':";
diff travis.files-status.before travis.files-status.after | awk "$padding";
echo "Comparing directories 'before' <-> 'after':";
diff travis.directories-status.before travis.directories-status.after | awk "$padding";
else
echo "Unknown command, only 'before', 'after' and 'compare' are available.";
fi;
|
daemonraco/toobasic
|
tests/assets/travis.save-files-status.sh
|
Shell
|
gpl-3.0
| 911 |
cd python
sudo mongod &
python3 tests.py || python tests.py
|
HMRecord/website
|
runTests.sh
|
Shell
|
gpl-3.0
| 61 |
ldapwhoami | sed 's/dn://' | sed 's/ou=paris,//'
|
randrini/42Pool
|
j00/ex04/who_am_i.sh
|
Shell
|
gpl-3.0
| 49 |
#!/bin/bash
# OpenRA packaging script for Linux (AppImage)
set -e
command -v make >/dev/null 2>&1 || { echo >&2 "Linux packaging requires make."; exit 1; }
command -v python >/dev/null 2>&1 || { echo >&2 "Linux packaging requires python."; exit 1; }
command -v tar >/dev/null 2>&1 || { echo >&2 "Linux packaging requires tar."; exit 1; }
command -v curl >/dev/null 2>&1 || command -v wget > /dev/null 2>&1 || { echo >&2 "Linux packaging requires curl or wget."; exit 1; }
DEPENDENCIES_TAG="20180723"
if [ $# -eq "0" ]; then
echo "Usage: $(basename "$0") version [outputdir]"
exit 1
fi
# Set the working dir to the location of this script
cd "$(dirname "$0")" || exit 1
TAG="$1"
OUTPUTDIR="$2"
SRCDIR="$(pwd)/../.."
BUILTDIR="$(pwd)/build"
UPDATE_CHANNEL=""
SUFFIX="-devel"
if [[ ${TAG} == release* ]]; then
UPDATE_CHANNEL="release"
SUFFIX=""
elif [[ ${TAG} == playtest* ]]; then
UPDATE_CHANNEL="playtest"
SUFFIX="-playtest"
elif [[ ${TAG} == pkgtest* ]]; then
UPDATE_CHANNEL="pkgtest"
SUFFIX="-pkgtest"
fi
pushd "${TEMPLATE_ROOT}" > /dev/null || exit 1
if [ ! -d "${OUTPUTDIR}" ]; then
echo "Output directory '${OUTPUTDIR}' does not exist.";
exit 1
fi
echo "Building core files"
pushd "${SRCDIR}" > /dev/null || exit 1
make linux-dependencies
make core SDK="-sdk:4.5"
make version VERSION="${TAG}"
make install-engine prefix="usr" DESTDIR="${BUILTDIR}/"
make install-common-mod-files prefix="usr" DESTDIR="${BUILTDIR}/"
popd > /dev/null || exit 1
# Add native libraries
echo "Downloading dependencies"
if command -v curl >/dev/null 2>&1; then
curl -s -L -O https://github.com/OpenRA/AppImageSupport/releases/download/${DEPENDENCIES_TAG}/libs.tar.bz2 || exit 3
curl -s -L -O https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage || exit 3
else
wget -cq https://github.com/OpenRA/AppImageSupport/releases/download/${DEPENDENCIES_TAG}/libs.tar.bz2 || exit 3
wget -cq https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage || exit 3
fi
chmod a+x appimagetool-x86_64.AppImage
echo "Building AppImage"
tar xf libs.tar.bz2
install -Dm 0755 libSDL2.so "${BUILTDIR}/usr/lib/openra/"
install -Dm 0644 SDL2-CS.dll.config "${BUILTDIR}/usr/lib/openra/"
install -Dm 0755 libopenal.so "${BUILTDIR}/usr/lib/openra/"
install -Dm 0644 OpenAL-CS.dll.config "${BUILTDIR}/usr/lib/openra/"
install -Dm 0755 liblua.so "${BUILTDIR}/usr/lib/openra/"
install -Dm 0644 Eluant.dll.config "${BUILTDIR}/usr/lib/openra/"
rm libs.tar.bz2 libSDL2.so SDL2-CS.dll.config libopenal.so OpenAL-CS.dll.config liblua.so Eluant.dll.config
build_appimage() {
MOD_ID=${1}
DISPLAY_NAME=${2}
APPDIR="$(pwd)/${MOD_ID}.appdir"
APPIMAGE="OpenRA-$(echo "${DISPLAY_NAME}" | sed 's/ /-/g')${SUFFIX}-x86_64.AppImage"
cp -r "${BUILTDIR}" "${APPDIR}"
# Add mod files
pushd "${SRCDIR}" > /dev/null || exit 1
cp -r "mods/${MOD_ID}" mods/modcontent "${APPDIR}/usr/lib/openra/mods"
popd > /dev/null || exit 1
# Add launcher and icons
sed "s/{MODID}/${MOD_ID}/g" AppRun.in | sed "s/{MODNAME}/${DISPLAY_NAME}/g" > AppRun.temp
install -m 0755 AppRun.temp "${APPDIR}/AppRun"
sed "s/{MODID}/${MOD_ID}/g" openra.desktop.in | sed "s/{MODNAME}/${DISPLAY_NAME}/g" | sed "s/{TAG}/${TAG}/g" > temp.desktop
echo "StartupWMClass=openra-${MOD_ID}-${TAG}" >> temp.desktop
install -Dm 0755 temp.desktop "${APPDIR}/usr/share/applications/openra-${MOD_ID}.desktop"
install -m 0755 temp.desktop "${APPDIR}/openra-${MOD_ID}.desktop"
sed "s/{MODID}/${MOD_ID}/g" openra-mimeinfo.xml.in | sed "s/{TAG}/${TAG}/g" > temp.xml
install -Dm 0755 temp.xml "${APPDIR}/usr/share/mime/packages/openra-${MOD_ID}.xml"
if [ -f "icons/${MOD_ID}_scalable.svg" ]; then
install -Dm644 "icons/${MOD_ID}_scalable.svg" "${APPDIR}/usr/share/icons/hicolor/scalable/apps/openra-${MOD_ID}.svg"
fi
for i in 16x16 32x32 48x48 64x64 128x128 256x256 512x512 1024x1024; do
if [ -f "icons/${MOD_ID}_${i}.png" ]; then
install -Dm644 "icons/${MOD_ID}_${i}.png" "${APPDIR}/usr/share/icons/hicolor/${i}/apps/openra-${MOD_ID}.png"
install -m644 "icons/${MOD_ID}_${i}.png" "${APPDIR}/openra-${MOD_ID}.png"
fi
done
install -d "${APPDIR}/usr/bin"
sed "s/{MODID}/${MOD_ID}/g" openra.appimage.in | sed "s/{TAG}/${TAG}/g" | sed "s/{MODNAME}/${DISPLAY_NAME}/g" > openra-mod.temp
install -m 0755 openra-mod.temp "${APPDIR}/usr/bin/openra-${MOD_ID}"
sed "s/{MODID}/${MOD_ID}/g" openra-server.appimage.in > openra-mod-server.temp
install -m 0755 openra-mod-server.temp "${APPDIR}/usr/bin/openra-${MOD_ID}-server"
sed "s/{MODID}/${MOD_ID}/g" openra-utility.appimage.in > openra-mod-utility.temp
install -m 0755 openra-mod-utility.temp "${APPDIR}/usr/bin/openra-${MOD_ID}-utility"
install -m 0755 gtk-dialog.py "${APPDIR}/usr/bin/gtk-dialog.py"
# travis-ci doesn't support mounting FUSE filesystems so extract and run the contents manually
./appimagetool-x86_64.AppImage --appimage-extract
# Embed update metadata if (and only if) compiled on travis
if [ ! -z "${TRAVIS_REPO_SLUG}" ]; then
ARCH=x86_64 ./squashfs-root/AppRun --no-appstream -u "zsync|https://master.openra.net/appimagecheck?mod=${MOD_ID}&channel=${UPDATE_CHANNEL}" "${APPDIR}" "${OUTPUTDIR}/${APPIMAGE}"
zsyncmake -u "https://github.com/${TRAVIS_REPO_SLUG}/releases/download/${TAG}/${APPIMAGE}" -o "${OUTPUTDIR}/${APPIMAGE}.zsync" "${OUTPUTDIR}/${APPIMAGE}"
else
ARCH=x86_64 ./squashfs-root/AppRun --no-appstream "${APPDIR}" "${OUTPUTDIR}/${APPIMAGE}"
fi
rm -rf "${APPDIR}"
}
build_appimage "ra" "Red Alert"
build_appimage "cnc" "Tiberian Dawn"
build_appimage "d2k" "Dune 2000"
# Clean up
rm -rf openra-mod.temp openra-mod-server.temp openra-mod-utility.temp temp.desktop temp.xml AppRun.temp appimagetool-x86_64.AppImage squashfs-root "${BUILTDIR}"
|
ABrandau/OpenRA
|
packaging/linux/buildpackage.sh
|
Shell
|
gpl-3.0
| 5,778 |
#!/bin/bash
# +--------------------------------------------------------------------+
# EFA Project whitelist and blacklist mass import script
# Version 20140921
# +--------------------------------------------------------------------+
# Copyright (C) 2014~2017 http://www.efa-project.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# +--------------------------------------------------------------------+
SQLTMPFILE="userimport.sql"
SQLTMPDIR="/tmp/EFA/"
infile=""
append="0"
overwrite="0"
quiet="0"
function help(){
echo
echo "EFA Mass User Import Help"
echo
echo "userimport.sh Copyright (C) 2014 efa-project.org"
echo "Licensed GNU GPL v3. This program comes with ABSOLUTELY NO WARRANTY"
echo "This is free software, and you are welcome to redistribute it under"
echo "certain conditions. See http://www.gnu.org/licenses for more details"
echo
echo "Usage: userimport.sh -f mylist -a|-o [-q]"
echo "-a append to existing list"
echo "-q force overwrite database tables without prompting"
echo "-o overwrite existing list (admins and domain admins exempt)"
echo
echo "user list mylist is newline comma separated list with each"
echo "line in the following format:"
echo
echo '<username>,<password>,<fullname>,<type>'
echo 'type={A,D,U,R,H}'
}
if [[ "$#" == "0" ]]; then
help
fi
if [[ `whoami` != "root" ]]; then
echo "Root access is required to execute script, exiting."
exit 1
fi
while [[ $# > 0 ]]
do
param="$1"
shift
case $param in
-f|--file)
infile="$1"
shift
;;
-a|--append)
append="1"
;;
-q|--quiet)
quiet="1"
;;
-o|--overwrite)
overwrite="1"
;;
*)
help
;;
esac
done
flag="0"
# parameter sanity check
if [[ $overwrite == "1" && $append == "1" ]]; then
echo "Incompatible parameter combination (-a and -o)"
flag="1"
fi
if [[ $quiet == "1" && $overwrite == "0" ]]; then
echo "Quiet flag (-q) used without overwrite (-o)"
flag="1"
fi
if [[ $infile == "" ]]; then
echo "No input file specified"
flag="1"
elif [[ ! -f $infile ]]; then
echo "File not found or not a regular file"
flag="1"
fi
[ $flag == "1" ] && exit 1
# get access to mysql
MAILWATCHSQLPWD=`grep MAILWATCHSQLPWD /etc/EFA-Config | sed 's/.*://'`
if [[ -z $MAILWATCHSQLPWD ]]; then
echo "Unable to access SQL password from /etc/EFA-Config, exiting."
fi
# Build SQL SCript Header and prompt for overwrite if needed
mkdir -p $SQLTMPDIR
rm -f $SQLTMPDIR$SQLTMPFILE
touch $SQLTMPDIR$SQLTMPFILE
echo 'LOCK TABLES `users` WRITE;' >> $SQLTMPDIR$SQLTMPFILE
if [[ $overwrite == "1" ]]; then
if [[ $quiet == "0" ]]; then
flag="0"
echo "The table in mySQL will be overwritten with values from your file."
echo -n "Continue? (y/N):"
read CONFIRM
while [ $flag -eq 0 ]
do
if [[ $CONFIRM == "y" || $CONFIRM == "Y" ]]
then
flag="1"
elif [[ $CONFIRM == "n" || $CONFIRM == "N" || $CONFIRM == "" ]]; then
exit 1
else
echo -n "Continue? (y/N):"
read CONFIRM
fi
done
fi
echo "DELETE from \`users\` where type RLIKE '[UHR]';" >> $SQLTMPDIR$SQLTMPFILE
fi
# Lock Tables for writing and begin input
echo -n 'INSERT INTO `users` (username,password,fullname,type) VALUES ' >> $SQLTMPDIR$SQLTMPFILE
# Process each line of file
firstloop="1"
TMPIFS=$IFS
IFS=","
while read col1 col2 col3 col4
do
username=""
password=""
fullname=""
type=""
# check input length
username=$col1
if [[ $username != "" && $username =~ ^.{2,60}$ ]]; then
password=$col2
if [[ $col2 != "" && $col2 =~ ^.{4,32}$ ]]; then
password=$col2
if [[ $col3 =~ ^.{0,50}$ ]]; then
fullname=$col3
if [[ $col4 != "" && $col4 =~ ^[ADURH]$ ]]; then
type=$col4
fi
fi
fi
if [[ $firstloop != "1" ]]; then
echo -n "," >> $SQLTMPDIR$SQLTMPFILE
else
firstloop="0"
fi
echo -n "('$username',md5('$password'),'$fullname','$type')" >> $SQLTMPDIR$SQLTMPFILE
fi
done < $infile
IFS=$TMPIFS
echo ";" >> $SQLTMPDIR$SQLTMPFILE
echo "UNLOCK TABLES;" >> $SQLTMPDIR$SQLTMPFILE
# Import into MySQL
mysql -u mailwatch --password=$MAILWATCHSQLPWD mailscanner < /tmp/EFA/userimport.sql
# Cleanup
rm -f /tmp/EFA/userimport.sql
rmdir /tmp/EFA
|
TheGr8Wonder/v3
|
tools/userimport.sh
|
Shell
|
gpl-3.0
| 4,936 |
#!/bin/bash
java -server -cp "../libs/*" -Xms3g -Xmx3g -XX:MaxPermSize=512m -XX:+DisableExplicitGC org.mot.core.simulation.SimulationLoader $1 $2 $3 $4 $5 $6
|
sgrotz/myopentrader
|
MyOpenTraderBin/bin/runSimulationLoader.sh
|
Shell
|
gpl-3.0
| 161 |
#! /usr/bin/env bash
echo "'$1'"
ID=$(python server/geturl.py "$1")
if [ -f "audio/${ID}.mp3" ]; then
echo "file downloaded"
echo "[ffmpeg] Post-process file ${ID}.mp3"
else
cd audio
youtube-dl -f bestaudio --no-progress -w -o "%(id)s.%(ext)s" --no-post-overwrites --extract-audio --audio-format mp3 --audio-quality 7 -- "https://youtube.com/watch?v=${ID}" 2>&1
fi
|
hsheth2/ensemble
|
server/yt.sh
|
Shell
|
gpl-3.0
| 384 |
#!/bin/bash
# Nginx configuration for couchpotato
# Author: liara
# Copyright (C) 2017 Swizzin
# Licensed under GNU General Public License v3.0 GPL-3 (in short)
#
# You may copy, distribute and modify the software as long as you track
# changes/dates in source files. Any modifications to our software
# including (via compiler) GPL-licensed code must also be made available
# under the GPL along with build & install instructions.
user=$(cut -d: -f1 < /root/.master.info)
isactive=$(systemctl is-active couchpotato)
if [[ $isactive == "active" ]]; then
systemctl stop couchpotato
fi
if [[ ! -f /etc/nginx/apps/couchpotato.conf ]]; then
cat > /etc/nginx/apps/couchpotato.conf <<RAD
location /couchpotato {
include /etc/nginx/snippets/proxy.conf;
proxy_pass http://127.0.0.1:5050/couchpotato;
auth_basic "What's the password?";
auth_basic_user_file /etc/htpasswd.d/htpasswd.${user};
}
RAD
fi
sed -i "s/url_base.*/url_base = couchpotato\nhost = 127.0.0.1/g" /home/${user}/.config/couchpotato/settings.conf
if [[ $isactive == "active" ]]; then
systemctl start couchpotato
fi
|
liaralabs/swizzin
|
scripts/nginx/couchpotato.sh
|
Shell
|
gpl-3.0
| 1,100 |
#! /bin/sh
# Radio Paradise
mpc add http://stream-dc1.radioparadise.com/aac-320
# WBER
mpc add http://wber-ice-encoder.monroe.edu:80/wber-high.mp3
# Gilman St
mpc add http://stream.southofgilman.org:8000/
# BBC World
mpc add http://bbcwssc.ic.llnwd.net/stream/bbcwssc_mp1_ws-einws
# KQED
mpc add http://streams2.kqed.org:80/kqedradio
# WBFO
mpc add http://playerservices.streamtheworld.com/api/livestream-redirect/WBFOFM.mp3
# WBFO
mpc add http://playerservices.streamtheworld.com/api/livestream-redirect/WBFOHD2.mp3
# WNED
mpc add http://playerservices.streamtheworld.com/api/livestream-redirect/WNEDFM.mp3
# Radio Astronomy
mpc add http://radio.astronomy.fm:8111/live
# KEXP
mpc add http://live-mp3-128.kexp.org:8000/
|
ErnieBoxer/PiGuiPiano
|
scripts/mpc_stations.sh
|
Shell
|
gpl-3.0
| 731 |
#!/bin/bash
function generateDawg() {
type=$1
lang=$2
echo "Generate $1 for lang $2"
cp "plain-dict/plain_"$type"_"$lang".txt" bin/plain_dic.txt
cd bin
./blitzkrieg &> /dev/null
cd ..
rm bin/plain_dic.txt
rm bin/dawg_dic.txt
mv bin/dawg_dic.dat "../baggle-solver/src/inouire/baggle/dict/dawg_"$type"_"$lang".dat"
}
function generateDawgGrid5() {
type=$1
lang=$2
echo "Generate $1 5x5 for lang $2"
cp "plain-dict/plain_"$type"_"$lang"_5x5.txt" bin/plain_dic.txt
cd bin
./blitzkrieg-5x5 &> /dev/null
cd ..
rm bin/plain_dic.txt
rm bin/dawg_dic.txt
mv bin/dawg_dic.dat "../baggle-solver/src/inouire/baggle/dict/dawg_"$type"_"$lang"_5x5.dat"
}
#compile generator
cd bin
gcc -o blitzkrieg blitzkrieg-trie-attack-dawg-creator.c
gcc -o blitzkrieg-5x5 blitzkrieg-trie-attack-dawg-creator-5x5.c
cd ..
#generate dict one by one
generateDawg "dict" "fr"
generateDawg "dict" "en"
generateDawg "blacklist" "fr"
generateDawgGrid5 "dict" "fr"
|
inouire/baggle
|
dict-gen/dict-generation.sh
|
Shell
|
gpl-3.0
| 1,011 |
# toggle English/German keyboard with alt+shift
[[ -f /usr/sbin/setxkbmap ]] && \
setxkbmap -option grp:alt_shift_toggle us,de
|
vpenso/scripts
|
var/aliases/keymap.sh
|
Shell
|
gpl-3.0
| 135 |
#!/bin/sh
docker tag nodejs-d8 nodejs-d8:`date +%Y%m%d`
|
jianyingdeshitou/docker-debian8
|
nodejs-d8/tag-date.sh
|
Shell
|
gpl-3.0
| 56 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
EXE="executable"
pushd $DIR
gcc -g -Wall -o $EXE test_array_init.c
./$EXE
# rm $EXE
popd
|
ltf/lab
|
clab/makeandrun.sh
|
Shell
|
gpl-3.0
| 169 |
check_switch_c C99 -std=gnu99 "Cannot find C99 compatible compiler."
check_switch_c NOUNUSED -Wno-unused-result
add_define_make NOUNUSED "$HAVE_NOUNUSED"
check_switch_c NOUNUSED_VARIABLE -Wno-unused-variable
add_define_make NOUNUSED_VARIABLE "$HAVE_NOUNUSED_VARIABLE"
# There are still broken 64-bit Linux distros out there. :)
[ -z "$CROSS_COMPILE" ] && [ -d /usr/lib64 ] && add_library_dirs /usr/lib64
[ -z "$CROSS_COMPILE" ] && [ -d /opt/local/lib ] && add_library_dirs /opt/local/lib
if [ "$OS" = 'BSD' ]; then
DYLIB=-lc;
else
DYLIB=-ldl;
fi
add_define_make DYLIB_LIB "$DYLIB"
if [ "$HAVE_NEON" = "yes" ]; then
CFLAGS="$CFLAGS -mfpu=neon -marm"
ASFLAGS="$ASFLAGS -mfpu=neon"
fi
if [ "$HAVE_FLOATHARD" = "yes" ]; then
CFLAGS="$CFLAGS -mfloat-abi=hard"
ASFLAGS="$ASFLAGS -mfloat-abi=hard"
fi
if [ "$HAVE_FLOATSOFTFP" = "yes" ]; then
CFLAGS="$CFLAGS -mfloat-abi=softfp"
ASFLAGS="$ASFLAGS -mfloat-abi=softfp"
fi
if [ "$HAVE_NEON" = "yes" ]; then
CFLAGS="$CFLAGS -mfpu=neon -marm"
ASFLAGS="$ASFLAGS -mfpu=neon"
fi
if [ "$HAVE_FLOATHARD" = "yes" ]; then
CFLAGS="$CFLAGS -mfloat-abi=hard"
ASFLAGS="$ASFLAGS -mfloat-abi=hard"
fi
if [ "$HAVE_FLOATSOFTFP" = "yes" ]; then
CFLAGS="$CFLAGS -mfloat-abi=softfp"
ASFLAGS="$ASFLAGS -mfloat-abi=softfp"
fi
if [ "$HAVE_SSE" = "yes" ]; then
CFLAGS="$CFLAGS -msse -msse2"
fi
if [ "$HAVE_EGL" != "no" ]; then
check_pkgconf EGL egl
# some systems have EGL libs, but no pkgconfig
if [ "$HAVE_EGL" = "no" ]; then
HAVE_EGL=auto && check_lib EGL "-lEGL $EXTRA_GL_LIBS"
[ "$HAVE_EGL" = "yes" ] && EGL_LIBS=-lEGL
else
EGL_LIBS="$EGL_LIBS $EXTRA_GL_LIBS"
fi
fi
if [ "$HAVE_EXYNOS" != "no" ]; then
check_pkgconf EXYNOS libdrm_exynos
check_pkgconf DRM libdrm
fi
if [ "$LIBRETRO" ]; then
echo "Explicit libretro used, disabling dynamic libretro loading ..."
HAVE_DYNAMIC='no'
else LIBRETRO="-lretro"
fi
[ "$HAVE_DYNAMIC" = 'yes' ] || {
check_lib RETRO "$LIBRETRO" retro_init "$DYLIB"
add_define_make libretro "$LIBRETRO"
}
if [ "$MAN_DIR" ]; then
add_define_make MAN_DIR "$MAN_DIR"
else
add_define_make MAN_DIR "${PREFIX}/share/man/man1"
fi
check_lib THREADS -lpthread pthread_create
check_lib DYLIB "$DYLIB" dlopen
check_lib NETPLAY -lc socket
if [ "$HAVE_NETPLAY" = 'yes' ]; then
HAVE_GETADDRINFO=auto
check_lib GETADDRINFO -lc getaddrinfo
if [ "$HAVE_GETADDRINFO" = 'yes' ]; then
HAVE_SOCKET_LEGACY='no'
else
HAVE_SOCKET_LEGACY='yes'
fi
HAVE_NETWORK_CMD='yes'
else
HAVE_NETWORK_CMD='no'
fi
check_lib GETOPT_LONG -lc getopt_long
if [ "$HAVE_DYLIB" = 'no' ] && [ "$HAVE_DYNAMIC" = 'yes' ]; then
echo "Dynamic loading of libretro is enabled, but your platform does not appear to have dlopen(), use --disable-dynamic or --with-libretro=\"-lretro\"".
exit 1
fi
check_pkgconf ALSA alsa
check_header OSS sys/soundcard.h
check_header OSS_BSD soundcard.h
check_lib AL -lopenal alcOpenDevice
check_pkgconf JACK jack 0.120.1
check_pkgconf PULSE libpulse
check_pkgconf SDL sdl 1.2.10
check_pkgconf SDL2 sdl2 2.0.0
if [ "$HAVE_SDL2" = 'yes' ]; then
if [ "$HAVE_SDL2" = 'yes' ] && [ "$HAVE_SDL" = 'yes' ]; then
echo "SDL drivers will be replaced by SDL2 ones."
HAVE_SDL=no
elif [ "$HAVE_SDL2" = 'no' ]; then
echo "SDL2 not found, skipping."
HAVE_SDL2=no
fi
fi
if [ "$HAVE_OPENGL" != 'no' ] && [ "$HAVE_GLES" != 'yes' ]; then
# On some distros, -lCg doesn't link against -lstdc++ it seems ...
check_lib CG -lCg cgCreateContext
else
echo "Ignoring Cg. Desktop OpenGL is not enabled."
HAVE_CG='no'
fi
check_pkgconf ZLIB zlib
if [ "$HAVE_THREADS" != 'no' ]; then
if [ "$HAVE_FFMPEG" != 'no' ]; then
check_pkgconf AVCODEC libavcodec 54
check_pkgconf AVFORMAT libavformat 54
check_pkgconf AVUTIL libavutil 51
check_pkgconf SWSCALE libswscale 2.1
check_header AV_CHANNEL_LAYOUT libavutil/channel_layout.h
( [ "$HAVE_FFMPEG" = 'auto' ] && ( [ "$HAVE_AVCODEC" = 'no' ] || [ "$HAVE_AVFORMAT" = 'no' ] || [ "$HAVE_AVUTIL" = 'no' ] || [ "$HAVE_SWSCALE" = 'no' ] ) && HAVE_FFMPEG='no' ) || HAVE_FFMPEG='yes'
fi
else
echo "Not building with threading support. Will skip FFmpeg."
HAVE_FFMPEG='no'
fi
check_lib DYNAMIC "$DYLIB" dlopen
if [ "$HAVE_KMS" != "no" ]; then
check_pkgconf GBM gbm 9.0
check_pkgconf DRM libdrm
if [ "$HAVE_GBM" = "yes" ] && [ "$HAVE_DRM" = "yes" ] && [ "$HAVE_EGL" = "yes" ]; then
HAVE_KMS=yes
elif [ "$HAVE_KMS" = "yes" ]; then
echo "Cannot find libgbm, libdrm and EGL libraries required for KMS. Compile without --enable-kms."
exit 1
else
HAVE_KMS=no
fi
fi
check_pkgconf LIBXML2 libxml-2.0
if [ "$HAVE_EGL" = "yes" ]; then
if [ "$HAVE_GLES" != "no" ]; then
if [ "$GLES_LIBS" ] || [ "$GLES_CFLAGS" ]; then
echo "Using custom OpenGLES CFLAGS ($GLES_CFLAGS) and LDFLAGS ($GLES_LIBS)."
add_define_make GLES_LIBS "$GLES_LIBS"
add_define_make GLES_CFLAGS "$GLES_CFLAGS"
else
HAVE_GLES=auto check_pkgconf GLES glesv2
[ "$HAVE_GLES" = "no" ] && HAVE_GLES=auto check_lib GLES "-lGLESv2 $EXTRA_GL_LIBS" && add_define_make GLES_LIBS "-lGLESv2 $EXTRA_GL_LIBS"
fi
fi
else
HAVE_GLES=no
fi
if [ "$HAVE_GLES" = "yes" ]; then
[ $HAVE_FBO != "no" ] && HAVE_FBO=yes
else
check_lib FBO -lGL glFramebufferTexture2D
fi
check_pkgconf FREETYPE freetype2
check_pkgconf X11 x11
[ "$HAVE_X11" = "no" ] && HAVE_XEXT=no && HAVE_XF86VM=no && HAVE_XINERAMA=no
check_pkgconf WAYLAND wayland-egl
check_pkgconf XKBCOMMON xkbcommon 0.3.2
check_pkgconf XEXT xext
check_pkgconf XF86VM xxf86vm
check_pkgconf XINERAMA xinerama
if [ "$HAVE_X11" = 'yes' ] && [ "$HAVE_XEXT" = 'yes' ] && [ "$HAVE_XF86VM" = 'yes' ]; then
check_pkgconf XVIDEO xv
else
echo "X11, Xext or xf86vm not present. Skipping X11 code paths."
HAVE_X11='no'
HAVE_XVIDEO='no'
fi
if [ "$HAVE_UDEV" != "no" ]; then
check_pkgconf UDEV libudev
if [ "$HAVE_UDEV" = "no" ]; then
HAVE_UDEV=auto && check_lib UDEV "-ludev"
[ "$HAVE_UDEV" = "yes" ] && UDEV_LIBS=-ludev
fi
fi
check_lib STRL -lc strlcpy
check_lib STRCASESTR -lc strcasestr
check_lib MMAP -lc mmap
check_pkgconf PYTHON python3
check_macro NEON __ARM_NEON__
add_define_make OS "$OS"
# Creates config.mk and config.h.
add_define_make GLOBAL_CONFIG_DIR "$GLOBAL_CONFIG_DIR"
VARS="RGUI ALSA OSS OSS_BSD AL JACK PULSE SDL SDL2 OPENGL GLES GLES3 EGL KMS EXYNOS GBM DRM DYLIB GETOPT_LONG THREADS CG LIBXML2 ZLIB DYNAMIC FFMPEG AVCODEC AVFORMAT AVUTIL SWSCALE FREETYPE XKBCOMMON XVIDEO X11 XEXT XF86VM XINERAMA WAYLAND MALI_FBDEV NETPLAY NETWORK_CMD COMMAND SOCKET_LEGACY FBO STRL STRCASESTR MMAP PYTHON FFMPEG_ALLOC_CONTEXT3 FFMPEG_AVCODEC_OPEN2 FFMPEG_AVIO_OPEN FFMPEG_AVFORMAT_WRITE_HEADER FFMPEG_AVFORMAT_NEW_STREAM FFMPEG_AVCODEC_ENCODE_AUDIO2 FFMPEG_AVCODEC_ENCODE_VIDEO2 BSV_MOVIE NEON FLOATHARD FLOATSOFTFP UDEV AV_CHANNEL_LAYOUT"
create_config_make config.mk $VARS
create_config_header config.h $VARS
|
tobiasjakobi/RetroArch
|
qb/config.libs.sh
|
Shell
|
gpl-3.0
| 7,037 |
#!/bin/sh
#
# Reindent CWD recursively using clang-format (assumed to be in your PATH),
# and per-component or per-directory .clang-format style specifications.
#
CPUS=`sysctl -n hw.logicalcpu`
CLANGFORMAT=`xcrun -find clang-format`
if [ ! -x "${CLANGFORMAT}" ]; then
echo "Could not find clang-format" 1>&2
exit 1
fi
echo "Using ${CLANGFORMAT} to reindent, using concurrency of ${CPUS}"
find -x . \! \( \( -name BUILD -o -name EXTERNAL_HEADERS -o -name libMicro -o -name zlib -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -type f \( -name \*.c -o -name \*.cpp \) -print0 | \
xargs -0 -P "${CPUS}" -n 10 "${CLANGFORMAT}" -style=file -i
ret=$?
if [ $ret -ne 0 ]; then
echo "reindent failed: $ret" 1>&2
exit 1
fi
exit 0
|
p01arst0rm/decorum-linux
|
_resources/kernels/xnu-x86/tools/reindent.sh
|
Shell
|
gpl-3.0
| 772 |
#!/bin/bash -xe
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. ${DIR}/functions.sh
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
if [[ "$#" -ne 5 ]]; then
echo "Usage: $0 board arch image release distro"
exit 1
fi
SYNCLOUD_BOARD=$1
ARCH=$2
SYNCLOUD_IMAGE=$3
RELEASE=$4
DISTRO=$5
SRC_FILES=files/${SYNCLOUD_BOARD}
SRC_ROOTFS=rootfs_${SYNCLOUD_BOARD}
DST_ROOTFS=dst_${SYNCLOUD_BOARD}/root
ROOTFS_FILE=rootfs-${DISTRO}-${ARCH}.tar.gz
echo "==== ${SYNCLOUD_BOARD}, ${ARCH} ===="
apt update
apt install -y wget parted kpartx
if [[ ! -f ${ROOTFS_FILE} ]]; then
wget https://github.com/syncloud/rootfs/releases/download/${RELEASE}/${ROOTFS_FILE} --progress dot:giga
else
echo "$ROOTFS_FILE is here"
fi
mkdir ${SRC_ROOTFS}
tar xzf ${ROOTFS_FILE} -C${SRC_ROOTFS}
cat ${SRC_ROOTFS}/etc/hosts
rm -rf ${ROOTFS_FILE}
LOOP=$(attach_image ${SYNCLOUD_IMAGE})
sync
partprobe /dev/$LOOP
DEVICE_PART_1=/dev/mapper/${LOOP}p1
DEVICE_PART_2=/dev/mapper/${LOOP}p2
lsblk ${DEVICE_PART_2} -o FSTYPE
fsck -fy ${DEVICE_PART_2}
UUID_FILE=${SYNCLOUD_BOARD}/root/uuid
if [[ -f "${UUID_FILE}" ]]; then
UUID=$(<${UUID_FILE})
change_uuid ${DEVICE_PART_2} ${UUID}
fi
mount ${DEVICE_PART_2} ${DST_ROOTFS}
ls -la ${SRC_ROOTFS}
ls -la ${SRC_ROOTFS}/etc
cat ${SRC_ROOTFS}/etc/hosts
ls -la ${DST_ROOTFS}
ls -la ${SYNCLOUD_BOARD}/root/
ls -la ${SYNCLOUD_BOARD}/root/etc
echo "copying rootfs"
cp -rp ${SRC_ROOTFS}/* ${DST_ROOTFS}/
cat ${DST_ROOTFS}/etc/hosts
rm -rf ${SRC_ROOTFS}
ls -la ${DST_ROOTFS}/lib
ls -la ${SYNCLOUD_BOARD}/root/lib
mv ${SYNCLOUD_BOARD}/root/lib/* ${DST_ROOTFS}/lib/
rm -rf ${SYNCLOUD_BOARD}/root/lib
cp -rp ${SYNCLOUD_BOARD}/root/* ${DST_ROOTFS}/
echo "copying files"
cp -rp ${SRC_FILES}/* ${DST_ROOTFS}/
if [[ ${ARCH} == "amd64" ]]; then
cat ${DST_ROOTFS}/etc/fstab
DEVICE_PART_1_UUID=$(blkid ${DEVICE_PART_1} -s UUID -o value)
sed -i 's#/dev/sda1#UUID='${DEVICE_PART_1_UUID}'#g' ${DST_ROOTFS}/etc/fstab
DEVICE_PART_2_UUID=$(blkid ${DEVICE_PART_2} -s UUID -o value)
sed -i 's#/dev/sda2#UUID='${DEVICE_PART_2_UUID}'#g' ${DST_ROOTFS}/etc/fstab
cat ${DST_ROOTFS}/etc/fstab
fi
echo "setting hostname"
echo syncloud > ${DST_ROOTFS}/etc/hostname
cat ${DST_ROOTFS}/etc/hosts
echo "127.0.0.1 syncloud" >> ${DST_ROOTFS}/etc/hosts
echo "::1 syncloud" >> ${DST_ROOTFS}/etc/hosts
grep localhost ${DST_ROOTFS}/etc/hosts
sync
umount ${DEVICE_PART_2}
kpartx -d ${SYNCLOUD_IMAGE}
dmsetup remove -f /dev/mapper/${LOOP}p1 || true
dmsetup remove -f /dev/mapper/${LOOP}p2 || true
losetup -d /dev/${LOOP} || true
losetup | grep img || true
ls -la ${DST_ROOTFS}
|
syncloud/image
|
tools/rootfs.sh
|
Shell
|
gpl-3.0
| 2,641 |
#! /bin/sh
# Java #
#export JAVA_HOME=/path/to/jdk
#export CLASSPATH=
#export PATH=$JAVA_HOME/bin:$PATH
## Apache Ant ##
#export ANT_HOME=/path/to/ant
#export ANT_OPTS=
#export PATH=$ANT_HOME/bin:$PATH
## Apache Maven ##
#export M2_HOME=/path/to/maven
#export M2=$M2_HOME/bin
#export MAVEN_OPTS=
#export M2_REPO=
#export PATH=$M2:$PATH
# Oracle #
#export ORACLE_HOME=/path/to/oracle
#export ORACLE_SID=
#export NLS_LANG=
|
thiagobaptista/nix_stuff
|
exports/exports.sh
|
Shell
|
gpl-3.0
| 431 |
#! /bin/bash
if [[ ! -e "mytar" && ! -x "mytar" ]]
then
echo "El ejecutable mytar no esta en este directorio."
exit 1
fi
if [[ -d "tmp" ]]
then
echo "Exixte el dir tmp, se borrara"
rm -r tmp
fi
mkdir tmp && cd tmp
echo "Hello world!" > file1.txt
head -n10 /etc/passwd > file2.txt
head -c1024 /dev/urandom > file3.txt
.././mytar -cf filetar.mtar file1.txt file2.txt file3.txt
mkdir out && cp filetar.mtar out
cd out && ../.././mytar -xf filetar.mtar
if diff file1.txt ../file1.txt && diff file2.txt ../file2.txt && diff file3.txt ../file3.txt &> /dev/null
then
cd ../..
echo "Correct"
exit 0
else
cd ../..
echo "Los archivos no son iguales"
exit 1
fi
|
albertoMarquez/SistemasOperativos-SO
|
Practica1/test.sh
|
Shell
|
gpl-3.0
| 697 |
###
#code to make it work on osx and linux
if
[[ $OSTYPE == darwin* ]]
then
readlink=$(which greadlink)
scriptdir="$(dirname $($readlink -f $0))"
else
scriptdir="$(dirname $(readlink -f $0))"
fi
#
#user defined variables that could be changed:
workingdir=./
script=$scriptdir/11-catSE.sh
###
function findSamples () {
find reads/ -mindepth 1 -maxdepth 1 -type d -exec basename {} \;| tr ' ' '\n'
}
cd $workingdir
findSamples | parallel bash $script {}
#To run:
#bash ~/path_to/00-runner.sh
|
pedrocrisp/NGS-pipelines
|
RNAseqPipe3/11-runner.sh
|
Shell
|
gpl-3.0
| 496 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.