code
stringlengths
2
1.05M
repo_name
stringlengths
5
110
path
stringlengths
3
922
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
2
1.05M
#!/bin/bash sudo true sudo apt-get install --yes \ apt-transport-https \ ca-certificates \ curl curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - sudo add-apt-repository \ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) \ stable" sudo apt-get update sudo apt-get install --yes docker-ce sudo docker run hello-world
edlund/pilcrow
util/ubuntu/xenial/Docker.sh
Shell
gpl-3.0
414
#!/usr/bin/env bash set -e echo "Installing dependencies" sudo apt-get install -q -y python-software-properties sudo apt-add-repository -y ppa:ansible/ansible sudo apt-get update -q sudo apt-get install -q -y ansible echo "Install php dependencies" composer install echo "Downloading and unzipping odm_automation" wget https://github.com/OpenDevelopmentMekong/odm-automation/archive/master.zip -O /tmp/odm_automation.zip unzip /tmp/odm_automation.zip -d /tmp/ echo "decrypting private key and adding it key to ssh agent" openssl aes-256-cbc -K $encrypted_f5c2fe88ed3f_key -iv $encrypted_f5c2fe88ed3f_iv -in odm_tech_rsa.enc -out ~/.ssh/id_rsa -d chmod 600 ~/.ssh/id_rsa eval `ssh-agent -s` ssh-add ~/.ssh/id_rsa
OpenDevelopmentMekong/wpckan
_ci/install.sh
Shell
gpl-3.0
716
#!/bin/sh PROGRAM=miniweb DEST=/usr/local/bin install --compare $PROGRAM $DEST ls -l $DEST/$PROGRAM
berndhs/miniweb
systeminst.sh
Shell
gpl-3.0
104
#!/bin/sh [ -n "$tests_included" ] && return tests_included=true test_readable_file() { [ ! -d "$1" ] && [ -r "$1" ] } test_writable_dir() { [ -d "$1" ] && [ -w "$1" ] } test_installed() { which "$1" >/dev/null } test_number() { # checks if value is a number. prints number to stdout if # test succeeds, otherwise does nothing # $1: value to check result=$( echo $1 | awk '(int($1) == $1) { print $1; }' ) [ -n "$result" ] }
jk977/twitch-plays
shell/tests.sh
Shell
gpl-3.0
464
#!/bin/bash name=$1 if [ -z "$1" ]; then echo "usage $0 [container-name]" echo "example: $0 php" exit fi container_id=`docker ps -aql -f "name=$name"` if [ ! -z "$container_id" ]; then docker stop $container_id docker rm $container_id fi
colt-xie/dockers
delete.sh
Shell
gpl-3.0
258
sh tulip_run_test.sh layout_ogdf_planarization_grid grid_approximation.tlp
jukkes/TulipProject
unit_test/gui/run_layout_ogdf_planarization_grid_test.sh
Shell
gpl-3.0
75
#!/bin/bash function test_dEploid { echo -n " dEploid $@ " for i in `seq 1 1`; do echo -n "." # Test using dEploid self-checks ./dEploid_dbg $@ -seed $i > /dev/null if [ $? -ne 0 ]; then echo "" echo "Executing \"./dEploid_dbg $@ -seed $i\" failed." echo "Debug Call: make -mj2 dEploid_dbg && ./dEploid_dbg $@ -seed $i 2>&1 | less" exit 1 fi echo -n "." # Test for memory leaks valgrind --error-exitcode=1 --leak-check=full -q --gen-suppressions=yes ./dEploid $@ -seed $i > /dev/null if [ $? -ne 0 ]; then echo "" echo "Valgrind check of \"./dEploid $@ -seed $i\" failed." exit 1 fi done echo " done." } function test_noRepeat { echo -n " dEploid $@ " # Test using dEploid self-checks ./dEploid_dbg $@ > /dev/null if [ $? -ne 0 ]; then echo "" echo "Executing \"./dEploid_dbg $@ -seed $i\" failed." echo "Debug Call: make -mj2 dEploid_dbg && ./dEploid_dbg $@ $i 2>&1 | less" exit 1 fi # Test for memory leaks valgrind --error-exitcode=1 --leak-check=full -q --gen-suppressions=yes ./dEploid $@ > /dev/null if [ $? -ne 0 ]; then echo "" echo "Valgrind check of \"./dEploid $@ \" failed." exit 1 fi echo " done." } sameFlags="-exclude data/testData/labStrains.test.exclude.txt -plaf data/testData/labStrains.test.PLAF.txt" echo "Testing examples" test_noRepeat test_noRepeat -help test_noRepeat -h test_noRepeat -v test_noRepeat -version # vcf.gz test wouldn't work, due to zlib version is 1.2.3.4 on travis and circle. it is difficult to update. #test_dEploid ${sameFlags} -vcf data/testData/PG0390-C.test.vcf.gz -noPanel -o tmp1 || exit 1 test_dEploid ${sameFlags} -vcf data/testData/PG0390-C.test.vcf -noPanel -o tmp1 -vcfOut || exit 1 # The following test takes long time, turn off on travis for now ... #test_dEploid ${sameFlags} -vcf data/testData/PG0390-C.test.vcf -panel data/testData/labStrains.test.panel.txt -o tmp2 -painting tmp1.hap || exit 1 #test_dEploid ${sameFlags} -vcf data/testData/PG0390-C.test.vcf -panel data/testData/labStrains.test.panel.txt -o tmp1 || exit 1 #test_dEploid ${sameFlags} -ref data/testData/PG0390-C.test.ref -alt data/testData/PG0390-C.test.alt -noPanel -o tmp1 || exit 1 #test_dEploid ${sameFlags} -ref data/testData/PG0390-C.test.ref -alt data/testData/PG0390-C.test.alt -panel data/testData/labStrains.test.panel.txt -o tmp1 || exit 1 echo ""
mcveanlab/DEploid
tests/test_binary.sh
Shell
gpl-3.0
2,445
#! /usr/bin/lua -- This shell script executes a system call (executes curl, piped to jshon), which will fetch a JSON stream from the Hackerspace Bremen website. -- os.execute "curl -s https://hackerspacehb.appspot.com/v2/status | jshon" -- -- EOF
arktisvogel/wooden-head-hsb-notifier
src/wooden-head-hsb-notifier.sh
Shell
gpl-3.0
248
#!/bin/bash echo "64hello78"; ./dozstring -n -xA -eB -p'&' -c"64hello78"; echo "------------------------------"; echo "73hello 54what 1492 stop it who 11 how 45* hello 15yes yes15 what.now"; ./dozstring -n -xA -eB -p'&' -c"73hello 54what 1492 stop it who 11 how 45* hello 15yes yes15 what.now"; echo "------------------------------"; echo "73hello 54what 1492 stop it who 11 how 45* hello 15yes yes15 what.now"; ./dozstring -sn -xA -eB -p'&' -c"73hello 54what 1492 stop it who 11 how 45* hello 15yes yes15 what.now";
dgoodmaniii/dozenal-droid
DozcalDroid/jni/dozstring/strtest.sh
Shell
gpl-3.0
520
#!/bin/sh baseDir=/opt/rpmrepo cd ${baseDir}/rpmbuild #clear && rpmbuild -ba SPECS/tomdavidson-repo.spec --sign echo 'Searching for rpms...' for arch in i386 x86_64 noarch; do for rpm in `ls ./RPMS/${arch}/*.rpm 2> /dev/null`; do echo " Found ${rpm}" mv ${rpm} ${baseDir}/repo/Centos/6/${arch}/ done done for rpm in `ls ./SRPMS/*.src.rpm 2> /dev/null`; do echo " Found ${rpm}" mv ${rpm} ${baseDir}/repo/Centos/6/SRPMS/ done
Tom-Davidson/rpmrepo
rpmbuild/build.sh
Shell
gpl-3.0
435
#!/bin/bash # NO PRODUCTION SAFE WARRANTY!! # Using GET to retrieve all meta data on EC2 instance. digger () { PADDRESS=$1; for ENTRY in `GET $PADDRESS`; do LEN=$((${#ENTRY}-1)); LC=${ENTRY:$LEN:1}; if [ $LC == "/" ]; then # Container NEWPADDRESS=$PADDRESS$ENTRY; digger $NEWPADDRESS else # Leaf T=`echo $ENTRY | grep "0="`; if [ ! -z $T ]; then ENTRY="0/openssh-key"; fi; LEAF="$PADDRESS$ENTRY"; CNAME=`echo $LEAF | sed -e 's/^.\{39\}//'` echo "$CNAME: `GET $LEAF`"; fi; done; echo PADDRESS=`echo $PADDRESS | sed -e 's/\/[^/]*\/$/\//g'` } digger http://169.254.169.254/latest/meta-data/
iceflow/aws-demo
tools/get-metadata.sh
Shell
gpl-3.0
908
#!/usr/bin/env bash set -e read -d '' getRatioBuckets <<-'EOF' || true def requestClassificationCount(prop): .requestedUrls | map( select( .classification and ( .classification | (prop == true) ) ) ) | length; def disconnectUrls: .requestedUrls | map( select( .blocks and .blocks.disconnect and ( .blocks.disconnect | (length > 0) ) ) ); def disconnectUrlEntries: [ .[].blocks.disconnect[] ]; def requestDisconnectCount(prop): map(prop) | unique | length; (.origin and .origin.classification and .origin.classification.isFailed == false) as $isNonFailedDomain | if $isNonFailedDomain then . as $root | disconnectUrls as $disconnectUrls | ($disconnectUrls | disconnectUrlEntries) as $disconnectUrlEntries | { requestCount: (.requestedUrls | length), counts: { # TODO: avoid having to explicitly list these classification properties? isSameDomain: requestClassificationCount(.isSameDomain), isSubdomain: requestClassificationCount(.isSubdomain), isSuperdomain: requestClassificationCount(.isSuperdomain), isSamePrimaryDomain: requestClassificationCount(.isSamePrimaryDomain), isInternalDomain: requestClassificationCount(.isInternalDomain), isExternalDomain: requestClassificationCount(.isExternalDomain), isDisconnectMatch: requestClassificationCount(.isDisconnectMatch), isNotDisconnectMatch: requestClassificationCount(.isNotDisconnectMatch), isSecure: requestClassificationCount(.isSecure), isInsecure: requestClassificationCount(.isInsecure), }, uniqueCounts: { disonnectDomains: ($disconnectUrlEntries | requestDisconnectCount(.domain)), disonnectOrganizations: ($disconnectUrlEntries | requestDisconnectCount(.organizations)), disonnectCategories: ($disconnectUrlEntries | requestDisconnectCount(.categories)), } } else { requestCount: 0 } end | .isNonFailedDomain = $isNonFailedDomain EOF cat | jq "$getRatioBuckets"
joelpurra/har-dulcify
src/questions/ratio-buckets.sh
Shell
gpl-3.0
1,946
#!/bin/bash # T-Pot Container Data Cleaner & Log Rotator # Set colors myRED="" myGREEN="" myWHITE="" # Set pigz myPIGZ=$(which pigz) # Set persistence myPERSISTENCE=$1 # Let's create a function to check if folder is empty fuEMPTY () { local myFOLDER=$1 echo $(ls $myFOLDER | wc -l) } # Let's create a function to rotate and compress logs fuLOGROTATE () { local mySTATUS="/opt/tpot/etc/logrotate/status" local myCONF="/opt/tpot/etc/logrotate/logrotate.conf" local myADBHONEYTGZ="/data/adbhoney/downloads.tgz" local myADBHONEYDL="/data/adbhoney/downloads/" local myCOWRIETTYLOGS="/data/cowrie/log/tty/" local myCOWRIETTYTGZ="/data/cowrie/log/ttylogs.tgz" local myCOWRIEDL="/data/cowrie/downloads/" local myCOWRIEDLTGZ="/data/cowrie/downloads.tgz" local myDIONAEABI="/data/dionaea/bistreams/" local myDIONAEABITGZ="/data/dionaea/bistreams.tgz" local myDIONAEABIN="/data/dionaea/binaries/" local myDIONAEABINTGZ="/data/dionaea/binaries.tgz" local myHONEYTRAPATTACKS="/data/honeytrap/attacks/" local myHONEYTRAPATTACKSTGZ="/data/honeytrap/attacks.tgz" local myHONEYTRAPDL="/data/honeytrap/downloads/" local myHONEYTRAPDLTGZ="/data/honeytrap/downloads.tgz" local myTANNERF="/data/tanner/files/" local myTANNERFTGZ="/data/tanner/files.tgz" # Ensure correct permissions and ownerships for logrotate to run without issues chmod 770 /data/ -R chown tpot:tpot /data -R chmod 644 /data/nginx/conf -R chmod 644 /data/nginx/cert -R # Run logrotate with force (-f) first, so the status file can be written and race conditions (with tar) be avoided logrotate -f -s $mySTATUS $myCONF # Compressing some folders first and rotate them later if [ "$(fuEMPTY $myADBHONEYDL)" != "0" ]; then tar -I $myPIGZ -cvf $myADBHONEYTGZ $myADBHONEYDL; fi if [ "$(fuEMPTY $myCOWRIETTYLOGS)" != "0" ]; then tar -I $myPIGZ -cvf $myCOWRIETTYTGZ $myCOWRIETTYLOGS; fi if [ "$(fuEMPTY $myCOWRIEDL)" != "0" ]; then tar -I $myPIGZ -cvf $myCOWRIEDLTGZ $myCOWRIEDL; fi if [ "$(fuEMPTY $myDIONAEABI)" != "0" ]; then tar -I $myPIGZ -cvf $myDIONAEABITGZ $myDIONAEABI; fi if [ "$(fuEMPTY $myDIONAEABIN)" != "0" ]; then tar -I $myPIGZ -cvf $myDIONAEABINTGZ $myDIONAEABIN; fi if [ "$(fuEMPTY $myHONEYTRAPATTACKS)" != "0" ]; then tar -I $myPIGZ -cvf $myHONEYTRAPATTACKSTGZ $myHONEYTRAPATTACKS; fi if [ "$(fuEMPTY $myHONEYTRAPDL)" != "0" ]; then tar -I $myPIGZ -cvf $myHONEYTRAPDLTGZ $myHONEYTRAPDL; fi if [ "$(fuEMPTY $myTANNERF)" != "0" ]; then tar -I $myPIGZ -cvf $myTANNERFTGZ $myTANNERF; fi # Ensure correct permissions and ownership for previously created archives chmod 770 $myADBHONEYTGZ $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ $myTANNERFTGZ chown tpot:tpot $myADBHONEYTGZ $myCOWRIETTYTGZ $myCOWRIEDLTGZ $myDIONAEABITGZ $myDIONAEABINTGZ $myHONEYTRAPATTACKSTGZ $myHONEYTRAPDLTGZ $myTANNERFTGZ # Need to remove subfolders since too many files cause rm to exit with errors rm -rf $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF # Recreate subfolders with correct permissions and ownership mkdir -p $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF chmod 770 $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF chown tpot:tpot $myADBHONEYDL $myCOWRIETTYLOGS $myCOWRIEDL $myDIONAEABI $myDIONAEABIN $myHONEYTRAPATTACKS $myHONEYTRAPDL $myTANNERF # Run logrotate again to account for previously created archives - DO NOT FORCE HERE! logrotate -s $mySTATUS $myCONF } # Let's create a function to clean up and prepare honeytrap data fuADBHONEY () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/adbhoney/*; fi mkdir -p /data/adbhoney/log/ /data/adbhoney/downloads/ chmod 770 /data/adbhoney/ -R chown tpot:tpot /data/adbhoney/ -R } # Let's create a function to clean up and prepare ciscoasa data fuCISCOASA () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ciscoasa/*; fi mkdir -p /data/ciscoasa/log chmod 770 /data/ciscoasa -R chown tpot:tpot /data/ciscoasa -R } # Let's create a function to clean up and prepare citrixhoneypot data fuCITRIXHONEYPOT () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/citrixhoneypot/*; fi mkdir -p /data/citrixhoneypot/logs/ chmod 770 /data/citrixhoneypot/ -R chown tpot:tpot /data/citrixhoneypot/ -R } # Let's create a function to clean up and prepare conpot data fuCONPOT () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/conpot/*; fi mkdir -p /data/conpot/log chmod 770 /data/conpot -R chown tpot:tpot /data/conpot -R } # Let's create a function to clean up and prepare cowrie data fuCOWRIE () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/cowrie/*; fi mkdir -p /data/cowrie/log/tty/ /data/cowrie/downloads/ /data/cowrie/keys/ /data/cowrie/misc/ chmod 770 /data/cowrie -R chown tpot:tpot /data/cowrie -R } # Let's create a function to clean up and prepare dicompot data fuDICOMPOT () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dicompot/log; fi mkdir -p /data/dicompot/log mkdir -p /data/dicompot/images chmod 770 /data/dicompot -R chown tpot:tpot /data/dicompot -R } # Let's create a function to clean up and prepare dionaea data fuDIONAEA () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/dionaea/*; fi mkdir -p /data/dionaea/log /data/dionaea/bistreams /data/dionaea/binaries /data/dionaea/rtp /data/dionaea/roots/ftp /data/dionaea/roots/tftp /data/dionaea/roots/www /data/dionaea/roots/upnp chmod 770 /data/dionaea -R chown tpot:tpot /data/dionaea -R } # Let's create a function to clean up and prepare elasticpot data fuELASTICPOT () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elasticpot/*; fi mkdir -p /data/elasticpot/log chmod 770 /data/elasticpot -R chown tpot:tpot /data/elasticpot -R } # Let's create a function to clean up and prepare elk data fuELK () { # ELK data will be kept for <= 90 days, check /etc/crontab for curator modification # ELK daemon log files will be removed if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/elk/log/*; fi mkdir -p /data/elk chmod 770 /data/elk -R chown tpot:tpot /data/elk -R } # Let's create a function to clean up and prepare fatt data fuFATT () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/fatt/*; fi mkdir -p /data/fatt/log chmod 770 -R /data/fatt chown tpot:tpot -R /data/fatt } # Let's create a function to clean up and prepare glastopf data fuGLUTTON () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/glutton/*; fi mkdir -p /data/glutton/log chmod 770 /data/glutton -R chown tpot:tpot /data/glutton -R } # Let's create a function to clean up and prepare heralding data fuHERALDING () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/heralding/*; fi mkdir -p /data/heralding/log chmod 770 /data/heralding -R chown tpot:tpot /data/heralding -R } # Let's create a function to clean up and prepare honeypy data fuHONEYPY () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeypy/*; fi mkdir -p /data/honeypy/log chmod 770 /data/honeypy -R chown tpot:tpot /data/honeypy -R } # Let's create a function to clean up and prepare honeysap data fuHONEYSAP () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeysap/*; fi mkdir -p /data/honeysap/log chmod 770 /data/honeysap -R chown tpot:tpot /data/honeysap -R } # Let's create a function to clean up and prepare honeytrap data fuHONEYTRAP () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/honeytrap/*; fi mkdir -p /data/honeytrap/log/ /data/honeytrap/attacks/ /data/honeytrap/downloads/ chmod 770 /data/honeytrap/ -R chown tpot:tpot /data/honeytrap/ -R } # Let's create a function to clean up and prepare ipphoney data fuIPPHONEY () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/ipphoney/*; fi mkdir -p /data/ipphoney/log chmod 770 /data/ipphoney -R chown tpot:tpot /data/ipphoney -R } # Let's create a function to clean up and prepare mailoney data fuMAILONEY () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/mailoney/*; fi mkdir -p /data/mailoney/log/ chmod 770 /data/mailoney/ -R chown tpot:tpot /data/mailoney/ -R } # Let's create a function to clean up and prepare mailoney data fuMEDPOT () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/medpot/*; fi mkdir -p /data/medpot/log/ chmod 770 /data/medpot/ -R chown tpot:tpot /data/medpot/ -R } # Let's create a function to clean up nginx logs fuNGINX () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/nginx/log/*; fi touch /data/nginx/log/error.log chmod 644 /data/nginx/conf -R chmod 644 /data/nginx/cert -R } # Let's create a function to clean up and prepare rdpy data fuRDPY () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/rdpy/*; fi mkdir -p /data/rdpy/log/ chmod 770 /data/rdpy/ -R chown tpot:tpot /data/rdpy/ -R } # Let's create a function to prepare spiderfoot db fuSPIDERFOOT () { mkdir -p /data/spiderfoot touch /data/spiderfoot/spiderfoot.db chmod 770 -R /data/spiderfoot chown tpot:tpot -R /data/spiderfoot } # Let's create a function to clean up and prepare suricata data fuSURICATA () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/suricata/*; fi mkdir -p /data/suricata/log chmod 770 -R /data/suricata chown tpot:tpot -R /data/suricata } # Let's create a function to clean up and prepare p0f data fuP0F () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/p0f/*; fi mkdir -p /data/p0f/log chmod 770 -R /data/p0f chown tpot:tpot -R /data/p0f } # Let's create a function to clean up and prepare p0f data fuTANNER () { if [ "$myPERSISTENCE" != "on" ]; then rm -rf /data/tanner/*; fi mkdir -p /data/tanner/log /data/tanner/files chmod 770 -R /data/tanner chown tpot:tpot -R /data/tanner } # Avoid unwanted cleaning if [ "$myPERSISTENCE" = "" ]; then echo $myRED"!!! WARNING !!! - This will delete ALL honeypot logs. "$myWHITE while [ "$myQST" != "y" ] && [ "$myQST" != "n" ]; do read -p "Continue? (y/n) " myQST done if [ "$myQST" = "n" ]; then echo $myGREEN"Puuh! That was close! Aborting!"$myWHITE exit fi fi # Check persistence, if enabled compress and rotate logs if [ "$myPERSISTENCE" = "on" ]; then echo "Persistence enabled, now rotating and compressing logs." fuLOGROTATE else echo "Cleaning up and preparing data folders." fuADBHONEY fuCISCOASA fuCITRIXHONEYPOT fuCONPOT fuCOWRIE fuDICOMPOT fuDIONAEA fuELASTICPOT fuELK fuFATT fuGLUTTON fuHERALDING fuHONEYSAP fuHONEYPY fuHONEYTRAP fuIPPHONEY fuMAILONEY fuMEDPOT fuNGINX fuRDPY fuSPIDERFOOT fuSURICATA fuP0F fuTANNER fi
dtag-dev-sec/tpotce
bin/clean.sh
Shell
gpl-3.0
10,881
#!/bin/bash #$ -cwd #$ -l rt_F=1 #$ -l h_rt=01:00:00 #$ -N benchmarker-deepcam #$ -j y #$ -o $JOB_NAME.o$JOB_ID # run with: qsub -g gcb50300 <this file> source /etc/profile.d/modules.sh module load gcc/9.3.0 module load python/3.8/3.8.7 module load cuda/11.2/11.2.2 python3 -m benchmarker \ --gpus=0 \ --framework=pytorch \ --problem=deepcam \ --problem_size=1600 \ --nb_epoch=5 \ --batch_size=32
undertherain/benchmarker
scripts/abci/deepcam-submit.sh
Shell
mpl-2.0
447
#!/bin/bash # check required environment variables are there : ${ANDROID_HOME:?"Need to set ANDROID_HOME non-empty"} if [ -d "src/platforms/android" ] then echo "Android already prepared, skipping." else # Work in the src directory cd src # add android platform ./node/node ./node_modules/.bin/cordova platform add https://github.com/apache/cordova-android#23fd0982b0faa6b7e169c2946eab07930f1f4d16 # copy java sources if [ -d "platforms/android/src/org/extendedmind/nightly" ] then cp -f assets/build/android/*.java platforms/android/src/org/extendedmind/nightly/ else cp -f assets/build/android/*.java platforms/android/src/org/extendedmind/ fi # Delete generated drawable directories to avoid Cordova splash from appearing anywhere rm -rf platforms/android/res/drawable* drawables=( "-hdpi" "-mdpi" "-xhdpi" "-xxhdpi" "-xxxhdpi" ) for item in "${drawables[@]}" do # create directory mkdir platforms/android/res/drawable${item} # icon cp -f assets/icons/android/*${item}.png platforms/android/res/drawable${item}/icon.png # splash cp -f assets/screens/android/*${item}.png platforms/android/res/drawable${item}/splash.9.png done # Use XHDPI as default size mkdir platforms/android/res/drawable cp -f assets/icons/android/icon-96-xhdpi.png platforms/android/res/drawable/icon.png cp -f assets/screens/android/launch-xhdpi.png platforms/android/res/drawable/splash.9.png # Copy reminder icon cp -f assets/icons/android/ic_popup_reminder.png platforms/android/res/drawable/ # copy build properties cp -f assets/build/android/build-extras.gradle platforms/android/ fi
ttiurani/extendedmind
frontend/cordova/prepare-android.sh
Shell
agpl-3.0
1,656
#!/bin/bash echo "Starting chld4.sh" #while [[ 1 == 1 ]]; do # sleep 1 #done # # Forkless sleep # read < ./test25_sleep.fifo
endurox-dev/endurox
atmitest/test025_cpmsrv/chld4.sh
Shell
agpl-3.0
138
# # ScreenInvader - A shared media experience. Instant and seamless. # Copyright (C) 2012 Amir Hassan <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # #!/bin/bash function black { echo -ne "\033[0;30m$1\033[0m"; tput sgr0; } function blue { echo -ne "\033[0;34m$1\033[0m"; tput sgr0; } function green { echo -ne "\033[0;32m$1\033[0m"; tput sgr0; } function cyan { echo -ne "\033[0;36m$1\033[0m"; tput sgr0; } function red { echo -ne "\033[1;31m$1\033[0m"; tput sgr0; } function purple { echo -ne "\033[0;35m$1\033[0m"; tput sgr0; } function brown { echo -ne "\033[0;33m$1\033[0m"; tput sgr0; } function lightgray { echo -ne "\033[0;37m$1\033[0m"; tput sgr0; } function lightgreen { echo -ne "\033[1;32m$1\033[0m"; tput sgr0; } function lightcyan { echo -ne "\033[1;36m$1\033[0m"; tput sgr0; } function lightred { echo -ne "\033[0;31m$1\033[0m"; tput sgr0; } function lightpurple { echo -ne "\033[1;35m$1\033[0m"; tput sgr0; } function yellow { echo -ne "\033[1;33m$1\033[0m"; tput sgr0; } function white { echo -ne "\033[1;37m$1\033[0m"; tput sgr0; } function warn { red "$1\n"; } function ok { green "ok\n"; } function failed { red "failed\n"; } function verbose { [ ! -z $VERBOSE ] && echo "$@ " 1>&2; } function verboseexec { verbose $@; bash -c "$@"; } function error { msg=$1; errcode=$2; [ -z "$errcode" ] && errcode=1; [ -z "$msg" ] && failed || red "$msg\n"; exit $errcode; } function try { fatal=true targetfd=2 OPTIND=0 while getopts 'no:' c do case $c in n)fatal="false";; o)targetfd="$OPTARG";; \?)echo "Invalid option: -$OPTARG" >&2;; esac done shift $((OPTIND-1)); errcode=0; msg="$1" echo -en "$msg: " 1>&2; shift; verbose $@; bash -c "$@ 2>&1 1>&$targetfd" errcode=$?; if [ $errcode != 0 ]; then [ "$fatal" != "true" ] && failed 1>&2 || error 1>&2; else ok 1>&2; fi return $errcode; } function check { log="&1" if [ -n "$BOOTSTRAP_LOG" ]; then echo "### $1" >> "$BOOTSTRAP_LOG" try -o "1" "$1" "$2" >>"$BOOTSTRAP_LOG" else try "$1" "$2" fi [ $? -ne 0 ] && exit $? } function checkcat { try -n -o "1" "$1" "echo; $2" } function trycat { try -o "1" "$1" "$2" } function absDir() { dir="`dirname $1`" absdir="`cd $dir; pwd`" echo $absdir } function absPath() { dir="`dirname $1`" base="`basename $1`" absdir="`cd $dir; pwd`" echo $absdir/$base } export -f check try error ok failed red green yellow verbose
screeninvader/ScreenInvader
.functions.sh
Shell
agpl-3.0
3,150
#!/bin/bash myoutsideip=$(curl https://api.ipify.org 2>/dev/null) ipipapi=$(curl -L http://freeapi.ipip.net/?ip="$myoutsideip" 2>/dev/null) myinsideip=$(curl myip.ipip.net 2>/dev/null) mylanip=$(curl ip.cn 2>/dev/null) echo "你的内网$mylanip" echo "你的外网$myinsideip" echo "你的当前海外访问及出口IP为: $myoutsideip,$ipipapi" sleep 5
kmahyyg/learn_py3
MyIP.sh
Shell
agpl-3.0
358
#!/bin/sh APP_PATH=~/Workspace/passbolt rm -fr $APP_PATH/app/Locale/default.pot $APP_PATH/app/Console/cake i18n extract --paths $APP_PATH/app/webroot/js/passbolt/,$APP_PATH/app/webroot/js/lb/, --app $APP_PATH/app/ --merge yes --output $APP_PATH/app/Locale mv $APP_PATH/app/Locale/default.pot $APP_PATH/app/Locale/jsDictionnary.pot
et304383/passbolt_api
app/Console/build_po.sh
Shell
agpl-3.0
335
#!/bin/bash set -eu read -p "This will remove all node server and client modules. Are you sure? " -n 1 -r if [[ "$REPLY" =~ ^[Yy]$ ]]; then rm -rf node_modules client/node_modules fi
Green-Star/PeerTube
scripts/danger/clean/modules.sh
Shell
agpl-3.0
188
#!/bin/sh # Parameters: # $1 = source dir # $2 = dst dir # $top_builddir = well you guessed it die_if_not_dir() { local dir for dir in "$@"; do test -d "$dir" && continue echo "Error: '$dir' is not a directory" exit 1 done } # Ensure that created dirs/files have 755/644 perms umask 022 # Sanity tests die_if_not_dir "$1" mkdir -p "$2" 2>/dev/null die_if_not_dir "$2" die_if_not_dir "$top_builddir" # Just copy (no sanitization) some kernel headers. eval `grep ^KERNEL_HEADERS "$top_builddir/.config"` if ! test "$KERNEL_HEADERS" \ || ! test -d "$KERNEL_HEADERS/asm" \ || ! test -d "$KERNEL_HEADERS/linux" \ ; then echo "Error: '$KERNEL_HEADERS' is not a directory containing kernel headers." echo "Check KERNEL_HEADERS= in your .config file." exit 1 fi # Do the copying only if src and dst dirs are not the same. # Be thorough: do not settle just for textual compare, # and guard against "pwd" being handled as shell builtin. # Double quoting looks weird, but it works (even bbox ash too). if test "`(cd "$KERNEL_HEADERS"; env pwd)`" != "`(cd "$2"; env pwd)`"; then # NB: source or target files and directories may be symlinks, # and for all we know, good reasons. # We must work correctly in these cases. This includes "do not replace # target symlink with real directory" rule. So, no rm -rf here please. mkdir -p "$2/asm" 2>/dev/null mkdir -p "$2/linux" 2>/dev/null # Exists, but is not a dir? That's bad, bail out die_if_not_dir "$2/asm" "$2/linux" # cp -HL creates regular destination files even if sources are symlinks. # This is intended. # (NB: you need busybox 1.11.x for this. earlier ones are slightly buggy) cp -RHL "$KERNEL_HEADERS/asm"/* "$2/asm" || exit 1 cp -RHL "$KERNEL_HEADERS/linux"/* "$2/linux" || exit 1 # Linux 2.4 doesn't have it if test -d "$KERNEL_HEADERS/asm-generic"; then mkdir -p "$2/asm-generic" 2>/dev/null die_if_not_dir "$2/asm-generic" cp -RHL "$KERNEL_HEADERS/asm-generic"/* "$2/asm-generic" || exit 1 fi # For paranoid reasons, we use explicit list of directories # which may be here. List last updated for linux-2.6.27: for dir in drm mtd rdma sound video; do if test -d "$KERNEL_HEADERS/$dir"; then mkdir -p "$2/$dir" 2>/dev/null die_if_not_dir "$2/$dir" cp -RHL "$KERNEL_HEADERS/$dir"/* "$2/$dir" || exit 1 fi done if ! test -f "$2/linux/version.h"; then echo "Warning: '$KERNEL_HEADERS/linux/version.h' is not found" echo "in kernel headers directory specified in .config." echo "Some programs won't like that. Consider fixing it by hand." fi fi # Fix mode/owner bits cd "$2" || exit 1 chmod -R u=rwX,go=rX . >/dev/null 2>&1 chown -R `id | sed 's/^uid=\([0-9]*\).*gid=\([0-9]*\).*$/\1:\2/'` . >/dev/null 2>&1
fallmq/uClibc-0.9.30.1
extra/scripts/install_kernel_headers.sh
Shell
lgpl-2.1
2,725
#!/bin/bash source `dirname $0`/utils.sh NGINX_FILES=$APP_ROOT/nginx_runtime kill -QUIT $( cat $NGINX_FILES/logs/nginx.pid ) echo "nginx stopped!"
supermicah/zeus
bin/stop.sh
Shell
lgpl-2.1
151
#!/bin/bash # plink can be installed with # $ sudo apt-get install putty-tools if [[ $# -lt 6 || $# -gt 7 ]]; then echo " * Reconnect a Sumo to a given router instead of being an access point." echo " * Synposis: #0 WLAN_INTERFACE WLAN_ESSID WLAN_ROUTER_IP WLAN_CON_NAME SUMO_ESSID SUMO_NEW_IP [WLAN_WEP_KEY]" echo " * Example: #0 wlan0 MyWifi 192.168.1.1 MyWifi0 JumpingSumo-XXX 192.168.2.10 XXXMYKEYXXX" echo "WLAN_INTERFACE is the name of the wireless interface," echo " it can be otained with '$ ifconfig'" echo "WLAN_ESSID is the ESSID if the wireless network," echo " it can be otained with '$ nmcli --pretty con show --active'" echo "WLAN_CON_NAME is the name of the connection in Network, " echo " it can be otained with '$ nm-connection-editor'" echo "WLAN_ROUTER_IP is the IP of the router, " echo " it can be shown with '$ ip route show | grep -i 'default via'" exit -1 fi WLAN_INTERFACE=$1 WLAN_ESSID=$2 WLAN_ROUTER_IP=$3 WLAN_CON_NAME=$4 SUMO_ESSID=$5 SUMO_NEW_IP=$6 WLAN_WEP_KEY=${7:-""} set -e echo; echo " * Checking if interface '$WLAN_INTERFACE' is connected..." ISCONNECTED=`nmcli con show --active | grep $WLAN_INTERFACE | wc -l` if [[ "$ISCONNECTED" > 0 ]]; then echo " * Disconnecting from interface '$WLAN_INTERFACE'..." nmcli dev disconnect $WLAN_INTERFACE fi echo; printf " * Waiting for network '$SUMO_ESSID' " LC=`nmcli d wifi list | grep "$SUMO_ESSID" | wc -l` while [[ $LC == 0 ]] ; do LC=`nmcli d wifi list | grep "$SUMO_ESSID" | wc -l` printf "." sleep 1 done echo echo ; echo " * Connecting to network '$SUMO_ESSID' ..." nmcli --pretty con up id "$SUMO_ESSID" echo " * Sending instructions to reconnect on '$WLAN_ESSID'..." INSTRFILE=`mktemp /tmp/sumoXXX.txt` INSTR="sleep 2 ; iwconfig wifi_bcm mode managed essid \"$WLAN_ESSID\"" #~ if [ ! -z WLAN_WEP_KEY ]; then #~ INSTR="$INSTR key s:$WLAN_WEP_KEY" #~ fi INSTR="$INSTR ; ifconfig wifi_bcm $SUMO_NEW_IP netmask 255.255.255.0 up" INSTR="$INSTR ; route add default gw $WLAN_ROUTER_IP" echo $INSTR > $INSTRFILE #~ cat $INSTRFILE set +e # timeout returns an error timeout 5 sh -c "plink -telnet 192.168.2.1 < $INSTRFILE" set -e echo ; echo " * Reconnecting with connection '$WLAN_CON_NAME'" nmcli --pretty con up id "$WLAN_CON_NAME" #~ nmcli -p d wifi connect "$WLAN_ESSID" password $WLAN_WEP_KEY iface wlan0 echo ; echo " * Cleaning..." rm $INSTRFILE echo ; echo " * Pinging $SUMO_NEW_IP for 3 seconds" ping -w 3 $SUMO_NEW_IP
arnaud-ramey/rossumo
scripts/sumo2router_client.bash
Shell
lgpl-3.0
2,488
#!/usr/bin/env bash # This scripts queries and modifies CPU scaling frequencies to produce more # accurate benchmark results. To move from a low energy state C-state to a # higher one, run 'governor.sh performance'. To move back to a low power state # run 'governor.sh powersave' or reboot. The script is based on code by # Andy Polyakov, http://www.openssl.org/~appro/cryptogams/. # Fixup ancient Bash # https://unix.stackexchange.com/q/468579/56041 if [[ -z "$BASH_SOURCE" ]]; then BASH_SOURCE="$0" fi if [[ "$EUID" -ne 0 ]]; then echo "This script must be run as root" [[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1 fi if [ "x$1" = "x" ]; then echo "usage: $0 on[demand]|pe[rformance]|po[wersave]|us[erspace]?" [[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1 fi # "on demand" may result in a "invalid write argument" or similar case $1 in on*|de*) governor="ondemand";; po*|pw*) governor="powersave";; pe*) governor="performance";; us*) governor="userspace";; \?) ;; *) echo "$1: unrecognized governor";; esac if [ -z "$governor" ]; then [[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1 fi cpus=$(ls /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor 2>/dev/null) if [ -z "$cpus" ]; then echo "Failed to read CPU system device tree" [[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1 fi echo "Current CPU governor scaling settings:" count=0 for cpu in $cpus; do echo " CPU $count:" $(cat "$cpu") ((count++)) done if [ "x$governor" != "x" ]; then for cpu in $cpus; do echo $governor > $cpu done fi echo "New CPU governor scaling settings:" count=0 for cpu in $cpus; do echo " CPU $count:" $(cat "$cpu") ((count++)) done [[ "$0" = "$BASH_SOURCE" ]] && exit 0 || return 0
smessmer/cryfs
vendor/cryptopp/vendor_cryptopp/TestScripts/governor.sh
Shell
lgpl-3.0
1,795
#!/bin/sh . /opt/pyrame/ports.sh if test $# -lt 7 then echo "usage $0 matrix_id s1 s2 s3 a1 a2 a3 [strategy]" exit 1 fi chkpyr2.py localhost $PATHS_PORT next_matrix_paths $@
sbinet-staging/pyrame
motion/cmd_paths/next_matrix.sh
Shell
lgpl-3.0
179
#!/usr/bin/env bash set -e PT_OPWD=$PWD cd -- "$(dirname "$(readlink "$0" || printf '%s\n' "$0")")" source ./utils.lib.bash fail_wrong_usage() { printf >&2 '%s\n' "$*" printf >&2 '%s\n' "USAGE: $0 <build root> [{run:<suite> | skip:<suite>}...]" exit 2 } if (( $# < 1 )); then fail_wrong_usage "No build root passed." fi PT_BUILD_DIR=$(resolve_relative "$1" "$PT_OPWD") # Shift the build root shift PT_SOURCE_DIR=.. case "$PT_TOOL" in '') PT_PREFIX=() ;; valgrind) PT_PREFIX=( valgrind -q --error-exitcode=42 ) ;; *) echo >&2 "$0: Unknown PT_TOOL: $PT_TOOL." exit 2 ;; esac PT_LUASTATUS=( "$PT_BUILD_DIR"/luastatus/luastatus ${DEBUG:+-l trace} ) PT_PARROT=$PT_BUILD_DIR/tests/parrot PT_WIDGET_FILES=() PT_FILES_TO_REMOVE=() PT_DIRS_TO_REMOVE=() declare -A PT_SPAWNED_THINGS=() declare -A PT_SPAWNED_THINGS_FDS_0=() declare -A PT_SPAWNED_THINGS_FDS_1=() PT_LINE= source ./pt_stopwatch.lib.bash # Sanity checks if ! [[ -x "$PT_BUILD_DIR"/luastatus/luastatus ]]; then echo >&2 "FATAL ERROR: '$PT_BUILD_DIR/luastatus/luastatus' is not an executable file." echo >&2 "Is '$PT_BUILD_DIR' the correct build root? Did you forget to build the project?" exit 1 fi if ! [[ -x "$PT_PARROT" ]]; then echo >&2 "FATAL ERROR: '$PT_PARROT' is not an executable file." echo >&2 "Did you forget to pass '-DBUILD_TESTS=ON' to cmake?" exit 1 fi pt_stack_trace() { echo >&2 "Stack trace:" local n=${#FUNCNAME[@]} local i=${1:-1} for (( ; i < n; ++i )); do local func=${FUNCNAME[$i]:-MAIN} local lineno=${BASH_LINENO[(( i - 1 ))]} local src=${BASH_SOURCE[$i]:-'???'} echo >&2 " in $src:$lineno (function $func)" done } pt_fail_internal_error() { printf >&2 '%s\n' '=== INTERNAL ERROR ===' "$@" pt_stack_trace 2 exit 3 } pt_fail() { printf >&2 '%s\n' '=== FAILED ===' "$@" pt_stack_trace 2 exit 1 } pt_write_widget_file() { local f f=$(mktemp --suffix=.lua) PT_WIDGET_FILES+=("$f") PT_FILES_TO_REMOVE+=("$f") cat > "$f" } pt_add_file_to_remove() { PT_FILES_TO_REMOVE+=("$1") } pt_add_dir_to_remove() { PT_DIRS_TO_REMOVE+=("$1") } pt_add_fifo() { rm -f "$1" mkfifo "$1" pt_add_file_to_remove "$1" } pt_read_line() { echo >&2 "Reading line..." IFS= read -r PT_LINE || pt_fail "pt_read_line: cannot read next line (process died?)" } pt_expect_line() { echo >&2 "Expecting line “$1”..." IFS= read -r PT_LINE || pt_fail "expect_line: cannot read next line (process died?)" if [[ "$PT_LINE" != "$1" ]]; then pt_fail "pt_expect_line: line does not match" "Expected: '$1'" "Found: '$PT_LINE'" fi } pt_spawn_thing() { local k=$1 shift local pid=${PT_SPAWNED_THINGS[$k]} if [[ -n $pid ]]; then pt_fail_internal_error "pt_spawn_thing: thing '$k' has already been spawned (PID $pid)." fi "$@" & pid=$! PT_SPAWNED_THINGS[$k]=$pid } pt_spawn_thing_pipe() { local k=$1 shift local pid=${PT_SPAWNED_THINGS[$k]} if [[ -n $pid ]]; then pt_fail_internal_error "pt_spawn_thing_pipe: thing '$k' has already been spawned (PID $pid)." fi local fifo_in=./_internal-tmpfifo-$k-in local fifo_out=./_internal-tmpfifo-$k-out pt_add_fifo "$fifo_in" pt_add_fifo "$fifo_out" "$@" >"$fifo_out" <"$fifo_in" & pid=$! PT_SPAWNED_THINGS[$k]=$pid exec {PT_SPAWNED_THINGS_FDS_0[$k]}<"$fifo_out" exec {PT_SPAWNED_THINGS_FDS_1[$k]}>"$fifo_in" } pt_has_spawned_thing() { local k=$1 [[ -n "${PT_SPAWNED_THINGS[$k]}" ]] } pt_close_fd() { local fd=$(( $1 )) exec {fd}>&- } pt_close_thing_fds() { local k=$1 local fd for fd in "${PT_SPAWNED_THINGS_FDS_0[$k]}" "${PT_SPAWNED_THINGS_FDS_1[$k]}"; do if [[ -n $fd ]]; then pt_close_fd "$fd" fi done unset PT_SPAWNED_THINGS_FDS_0[$k] unset PT_SPAWNED_THINGS_FDS_1[$k] } pt_wait_thing() { local k=$1 local pid=${PT_SPAWNED_THINGS[$k]} if [[ -z $pid ]]; then pt_fail_internal_error "pt_wait_thing: unknown thing '$k' (PT_SPAWNED_THINGS has no such key)" fi echo >&2 "Waiting for '$k' (PID $pid)..." local c=0 wait "${PT_SPAWNED_THINGS[$k]}" || c=$? unset PT_SPAWNED_THINGS[$k] pt_close_thing_fds "$k" return -- "$c" } pt_kill_thing() { local k=$1 local pid=${PT_SPAWNED_THINGS[$k]} if [[ -n $pid ]]; then kill "$pid" || pt_fail "Cannot kill '$k' (PID $pid)." wait "$pid" || true fi unset PT_SPAWNED_THINGS[$k] pt_close_thing_fds "$k" } pt_spawn_luastatus() { pt_spawn_thing luastatus \ "${PT_PREFIX[@]}" "${PT_LUASTATUS[@]}" -b ./barlib-mock.so "$@" "${PT_WIDGET_FILES[@]}" } pt_spawn_luastatus_for_barlib_test_via_runner() { local runner=$1 shift pt_spawn_thing_pipe luastatus \ "$runner" "${PT_PREFIX[@]}" "${PT_LUASTATUS[@]}" "$@" "${PT_WIDGET_FILES[@]}" } pt_spawn_luastatus_directly() { pt_spawn_thing luastatus \ "${PT_PREFIX[@]}" "${PT_LUASTATUS[@]}" "$@" "${PT_WIDGET_FILES[@]}" } pt_wait_luastatus() { pt_wait_thing luastatus } pt_kill_luastatus() { pt_kill_thing luastatus } pt_kill_everything() { local k for k in "${!PT_SPAWNED_THINGS[@]}"; do pt_kill_thing "$k" done } pt_testcase_begin() { true } pt_testcase_end() { pt_kill_luastatus pt_kill_everything local x for x in "${PT_FILES_TO_REMOVE[@]}"; do rm -f "$x" done for x in "${PT_DIRS_TO_REMOVE[@]}"; do rmdir "$x" done PT_FILES_TO_REMOVE=() PT_DIRS_TO_REMOVE=() PT_WIDGET_FILES=() } trap ' for k in "${!PT_SPAWNED_THINGS[@]}"; do pid=${PT_SPAWNED_THINGS[$k]} echo >&2 "Killing “$k” (PID $pid)." kill "$pid" || true done ' EXIT pt_run_test_case() { echo >&2 "==> Invoking file '$1'..." source "$1" } pt_run_test_suite() { if ! [[ -d $1 ]]; then pt_fail_internal_error "'$1' is not a directory." fi echo >&2 "==> Listing files in '$1'..." local f for f in "$1"/*; do if [[ $f != *.lib.bash ]]; then pt_fail_internal_error "File '$f' does not have suffix '.lib.bash'." fi if [[ ${f##*/} != [0-9][0-9]-* ]]; then pt_fail_internal_error "File '$f' does not have prefix of two digits and a dash (e.g. '99-testcase.lib.bash')." fi pt_run_test_case "$f" done } pt_main() { local args_run=() local -A args_skip=() local arg for arg in "$@"; do if [[ $arg == */* ]]; then fail_wrong_usage "Invalid argument: '$arg': suite name can not contain a slash." fi if [[ $arg != *:* ]]; then fail_wrong_usage "Invalid argument: '$arg': is not of key:value form." fi local k=${arg%%:*} local v=${arg#*:} if [[ -z "$v" ]]; then fail_wrong_usage "Invalid argument: '$arg': value is empty." fi case "$k" in run) args_run+=("$v") ;; skip) args_skip[$v]=1 ;; *) fail_wrong_usage "Invalid argument: '$arg': unknown key '$k'." ;; esac done if (( ${#args_run[@]} != 0 )); then local x for x in "${args_run[@]}"; do local d=./pt_tests/$x if [[ -n "${args_skip[$x]}" ]]; then echo >&2 "==> Skipping test suite '$d'." continue fi pt_run_test_suite "$d" done else local d for d in ./pt_tests/*; do local x=${d##*/} if [[ -n "${args_skip[$x]}" ]]; then echo >&2 "==> Skipping test suite '$d'." continue fi pt_run_test_suite "$d" done fi } pt_main "$@" echo >&2 "=== PASSED ==="
shdown/luastatus
tests/pt.sh
Shell
lgpl-3.0
7,943
#!/bin/bash # Root folder where TurboParser is installed. root_folder="`cd $(dirname $0);cd ..;pwd`" export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${root_folder}/deps/local/lib" # Set options. language=$1 # Example: "slovene" or "english_proj". train_algorithm=svm_mira # Training algorithm. train_algorithm_pruner=crf_mira # Training algorithm for pruner. num_epochs=10 # Number of training epochs. num_epochs_pruner=10 # Number of training epochs for the pruner. regularization_parameter=0.001 # The C parameter in MIRA. regularization_parameter_pruner=1e12 # Same for the pruner. train=true test=true prune=true # This will revert to false if model_type=basic. train_external_pruner=false # If true, the pruner is trained separately. posterior_threshold=0.0001 # Posterior probability threshold for the pruner. pruner_max_heads=10 # Maximum number of candidate heads allowed by the pruner. labeled=true # Output dependency labels. large_feature_set=true # Use a large feature set (slower but more accurate). case_sensitive=false # Distinguish word upper/lower case. form_cutoff=0 # Cutoff in word occurrence. lemma_cutoff=0 # Cutoff in lemma occurrence. model_type=standard # Parts used in the model (subset of "af+cs+gp+as+hb+np+dp+gs+ts"). # Some shortcuts are: "standard" (means "af+cs+gp"); # "basic" (means "af"); and "full" (means "af+cs+gp+as+hb+gs+ts"). # Currently, flags np+dp are not recommended because they # make the parser a lot slower. if [ "${model_type}" == "basic" ] then echo "Reverting prune to false..." prune=false fi suffix=parser_pruned-${prune}_model-${model_type} suffix_pruner=parser_pruner_C-${regularization_parameter_pruner} # Set path folders. path_bin=${root_folder} # Folder containing the binary. path_scripts=${root_folder}/scripts # Folder containing scripts. path_data=${root_folder}/data/${language} # Folder with the data. path_models=${root_folder}/models/${language} # Folder where models are stored. path_results=${root_folder}/results/${language} # Folder for the results. # Create folders if they don't exist. mkdir -p ${path_data} mkdir -p ${path_models} mkdir -p ${path_results} # Set file paths. Allow multiple test files. file_model=${path_models}/${language}_${suffix}.model file_pruner_model=${path_models}/${language}_${suffix_pruner}.model file_results=${path_results}/${language}_${suffix}.txt file_pruner_results=${path_results}/${language}_${suffix_pruner}.txt if [ "$language" == "english_proj" ] || [ "$language" == "english_proj_stanford" ] then file_train_orig=${path_data}/${language}_train.conll.predpos files_test_orig[0]=${path_data}/${language}_test.conll files_test_orig[1]=${path_data}/${language}_dev.conll files_test_orig[2]=${path_data}/${language}_test.conll.predpos files_test_orig[3]=${path_data}/${language}_dev.conll.predpos file_train=${path_data}/${language}_ftags_train.conll.predpos files_test[0]=${path_data}/${language}_ftags_test.conll files_test[1]=${path_data}/${language}_ftags_dev.conll files_test[2]=${path_data}/${language}_ftags_test.conll.predpos files_test[3]=${path_data}/${language}_ftags_dev.conll.predpos rm -f file_train awk 'NF>0{OFS="\t";NF=10;$4=$5;$5=$5;print}NF==0{print}' ${file_train_orig} \ > ${file_train} for (( i=0; i<${#files_test[*]}; i++ )) do file_test_orig=${files_test_orig[$i]} file_test=${files_test[$i]} rm -f file_test awk 'NF>0{OFS="\t";NF=10;$4=$5;$5=$5;print}NF==0{print}' ${file_test_orig} \ > ${file_test} done elif [ "$language" == "english" ] then file_train=${path_data}/${language}_train.conll files_test[0]=${path_data}/${language}_test.conll files_test[1]=${path_data}/${language}_dev.conll elif [ "$language" == "dutch" ] then file_train=${path_data}/${language}_train.conll files_test[0]=${path_data}/${language}_test.conll else # For all languages except english and dutch, # replace coarse tags by fine tags. file_train_orig=${path_data}/${language}_train.conll file_test_orig=${path_data}/${language}_test.conll file_train=${path_data}/${language}_ftags_train.conll file_test=${path_data}/${language}_ftags_test.conll rm -f file_train file_test awk 'NF>0{OFS="\t";NF=10;$4=$5;$5=$5;print}NF==0{print}' ${file_train_orig} \ > ${file_train} awk 'NF>0{OFS="\t";NF=10;$4=$5;$5=$5;print}NF==0{print}' ${file_test_orig} \ > ${file_test} files_test[0]=${file_test} fi # Obtain a prediction file path for each test file. for (( i=0; i<${#files_test[*]}; i++ )) do file_test=${files_test[$i]} file_prediction=${file_test}.${suffix}.pred file_pruner_prediction=${file_test}.${suffix_pruner}.pred files_prediction[$i]=${file_prediction} files_pruner_prediction[$i]=${file_pruner_prediction} done ################################################ # Train the pruner model. ################################################ if ${train_external_pruner} then echo "Training pruner..." ${path_bin}/TurboParser \ --train \ --train_epochs=${num_epochs_pruner} \ --file_model=${file_pruner_model} \ --file_train=${file_train} \ --model_type=basic \ --labeled=false \ --prune_basic=false \ --only_supported_features \ --form_case_sensitive=${case_sensitive} \ --form_cutoff=${form_cutoff} \ --lemma_cutoff=${lemma_cutoff} \ --train_algorithm=${train_algorithm_pruner} \ --train_regularization_constant=${regularization_parameter_pruner} \ --large_feature_set=false \ --logtostderr rm -f ${file_pruner_results} for (( i=0; i<${#files_test[*]}; i++ )) do file_test=${files_test[$i]} file_pruner_prediction=${files_pruner_prediction[$i]} echo "" echo "Testing pruner on ${file_test}..." ${path_bin}/TurboParser \ --test \ --evaluate \ --file_model=${file_pruner_model} \ --file_test=${file_test} \ --file_prediction=${file_pruner_prediction} \ --logtostderr echo "" echo "Evaluating pruner..." touch ${file_pruner_results} perl ${path_scripts}/eval.pl -b -q -g ${file_test} -s ${file_pruner_prediction} | tail -5 \ >> ${file_pruner_results} cat ${file_pruner_results} done fi ################################################ # Train the parser. ################################################ if $train then if $train_external_pruner then # The pruner was already trained. Just set the external pruner # to the model that was obtained and train the parser. echo "Training..." ${path_bin}/TurboParser \ --train \ --train_epochs=${num_epochs} \ --file_model=${file_model} \ --file_train=${file_train} \ --labeled=${labeled} \ --prune_basic=${prune} \ --pruner_posterior_threshold=${posterior_threshold} \ --pruner_max_heads=${pruner_max_heads} \ --use_pretrained_pruner \ --file_pruner_model=${file_pruner_model} \ --form_case_sensitive=${case_sensitive} \ --form_cutoff=${form_cutoff} \ --lemma_cutoff=${lemma_cutoff} \ --train_algorithm=${train_algorithm} \ --train_regularization_constant=${regularization_parameter} \ --large_feature_set=${large_feature_set} \ --model_type=${model_type} \ --logtostderr else # Train a pruner along with the parser. ${path_bin}/TurboParser \ --train \ --train_epochs=${num_epochs} \ --file_model=${file_model} \ --file_train=${file_train} \ --labeled=${labeled} \ --form_case_sensitive=${case_sensitive} \ --form_cutoff=${form_cutoff} \ --lemma_cutoff=${lemma_cutoff} \ --train_algorithm=${train_algorithm} \ --train_regularization_constant=${regularization_parameter} \ --large_feature_set=${large_feature_set} \ --model_type=${model_type} \ --prune_basic=${prune} \ --pruner_posterior_threshold=${posterior_threshold} \ --pruner_max_heads=${pruner_max_heads} \ --pruner_train_epochs=${num_epochs_pruner} \ --pruner_train_algorithm=${train_algorithm_pruner} \ --pruner_train_regularization_constant=${regularization_parameter_pruner} \ --logtostderr fi fi ################################################ # Test the parser. ################################################ if $test then rm -f ${file_results} for (( i=0; i<${#files_test[*]}; i++ )) do file_test=${files_test[$i]} file_prediction=${files_prediction[$i]} echo "" echo "Testing on ${file_test}..." ${path_bin}/TurboParser \ --test \ --evaluate \ --file_model=${file_model} \ --file_test=${file_test} \ --file_prediction=${file_prediction} \ --logtostderr echo "" echo "Evaluating..." touch ${file_results} perl ${path_scripts}/eval.pl -b -q -g ${file_test} -s ${file_prediction} | tail -5 \ >> ${file_results} cat ${file_results} done fi
ilantc/Turbo-parser
scripts/train_test_parser.sh
Shell
lgpl-3.0
9,564
#!/bin/sh # vdemeester/obnam entrypoint test -z "$DESTINATION" && DESTINATION="/obnam/destination" test -z "$SOURCE" && SOURCE="/obnam/source" test -z "$CONFIG" && CONFIG="/obnam/config" OBNAM_OPTS="" if test -n "$1"; then DESTINATION=$1 shift fi if ! test "$(ls -A ${SOURCE})"; then echo "$SOURCE folder is empty, skipping backup" exit 1 fi if test -r ${CONFIG}; then echo "Use ${CONFIG} as configuration" OBNAM_OPTS="${OBNAM_OPTS} --config=${CONFIG} $SOURCE" else OBNAM_OPTS=" -r $DESTINATION $SOURCE" fi echo "Backup using ${OBNAM_OPTS}" obnam backup $OBNAM_OPTS
vdemeester/dockerfiles
obnam/obnam-entrypoint.sh
Shell
unlicense
600
#!/bin/bash shopt -s extglob rm !(*.c|*.sh|*.md|LICENSE)
skybon/timus
clean.sh
Shell
apache-2.0
57
#!/bin/bash # make a backup of my git repos on github/gitlab/bitbucket/etc. set -e # stops bash if any command fails git config --global credential.helper "cache --timeout=3600" DATE=`date '+%Y-%m-%d'` mkdir -p gits-$DATE cd gits-$DATE function f_github() { git clone [email protected]:$1/$2.git # use SSH key tar czf github-$1-$2-$DATE.tar.gz $2 rm -rf $2 } function f_bitbucket() { #git clone https://[email protected]/$1/$2.git git clone [email protected]:$1/$2.git # use SSH key tar czf bitbucket-$1-$2-$DATE.tar.gz $2 rm -rf $2 } function f_gituliege() { git clone [email protected]:$1/$2.git # use SSH key tar czf gituliege-${1//\//_}-$2-$DATE.tar.gz $2 rm -rf $2 } function f_blueberry() { git clone [email protected]:/home/metafor/GIT/$2.git tar czf blueberry-$1-$2-$DATE.tar.gz $2 rm -rf $2 } # github perso f_github rboman rboman.github.io f_github rboman progs f_github rboman dgwaves f_github rboman dg f_github rboman gmshio f_github rboman math0471 f_github rboman math0471.wiki f_github rboman travis-cpp f_github rboman femcode f_github rboman fsi # github a&m f_github ulgltas CUPyDO f_github ulgltas CUPyDO.wiki f_github ulgltas waves f_github ulgltas waves.wiki f_github ulgltas VLM f_github ulgltas linuxbin f_github ulgltas modali f_github ulgltas NativeSolid f_github ulgltas SU2 f_github ulgltas Trilinos f_github ulgltas ceci f_github ulgltas plotter2d f_github ulgltas Trusses # github math0471 f_github math0471 dg_shallow f_github math0471 dg_maxwell f_github math0471 dg_acoustics f_github math0471 sph f_github math0471 fe2 f_github math0471 fdtd_brain f_github math0471 fdtd_oven # others #f_github mlucio89 CUPyDO #f_github mlucio89 PFEM #f_github mlucio89 Trusses # gitlab uliege f_gituliege R.Boman lamtools f_gituliege R.Boman lamtools.wiki f_gituliege R.Boman lam3_postpro f_gituliege R.Boman lam3_xmesher f_gituliege R.Boman lam3_user f_gituliege R.Boman lam3 f_gituliege R.Boman lam3_chaining f_gituliege R.Boman ceci_copy f_gituliege R.Boman idm f_gituliege R.Boman mogador f_gituliege R.Boman CT f_gituliege R.Boman math0024 f_gituliege R.Boman math0471_latex f_gituliege R.Boman code f_gituliege R.Boman monprojet f_gituliege R.Boman svnproj_trunk f_gituliege am-dept LieGroup f_gituliege am-dept PFEM f_gituliege am-dept PFEM.wiki f_gituliege am-dept/MN2L oo_meta f_gituliege am-dept/MN2L oo_nda f_gituliege am-dept/MN2L MetaLubSetup f_gituliege am-dept/MN2L MetaLub f_gituliege am-dept/MN2L keygen f_gituliege am-dept/MN2L MetaforSetup f_gituliege am-dept/MN2L mumps-4.10.0 f_gituliege am-dept/MN2L mumps-5.1.2 f_gituliege am-dept/MN2L tetgen-1.4.3 f_gituliege am-dept/MN2L triangle-1.6 f_gituliege am-dept/MN2L LagamineAPI f_gituliege am-dept/MN2L MetaforF f_gituliege am-dept/MN2L parasolid f_gituliege UEE Lagamine
rboman/progs
sandbox/bash/bck_rep_git.sh
Shell
apache-2.0
2,851
pkg_origin=core pkg_name=ant pkg_version=1.10.1 pkg_maintainer='The Habitat Maintainers <[email protected]>' pkg_license=('Apache-2.0') pkg_source=https://github.com/apache/ant/archive/rel/$pkg_version.tar.gz pkg_shasum=8bb3211dd7c849cfd61fb7573c8054909bddbc0f35ec5bed846cde6b132b11a6 pkg_deps=(lilian/coreutils lilian/jdk8) pkg_build_deps=(lilian/jdk8) pkg_bin_dirs=(bin) pkg_include_dirs=(include) pkg_lib_dirs=(lib) do_build() { export JAVA_HOME=$(hab pkg path lilian/jdk8) pushd $HAB_CACHE_SRC_PATH/$pkg_name-rel-$pkg_version sh ./build.sh -Ddist.dir=$pkg_prefix dist } do_install() { return 0 }
be-plans/be
ant/plan.sh
Shell
apache-2.0
610
#!/bin/bash YARA=3.5.0 GEOIP=1.6.0 MOLOCH=0.17.1 #Usage and Input Arguments if [ -n "$3" ]; then export TOOLCHAINDIR=$1 export TARGETDIR=$2 export FILESDIR=$3 export PREFIX=/usr export TDIR=/root/moloch else echo "Usage: openwrt-moloch /full/path/to/your/openwrt/toolchain /full/path/to/your/openwrt/target /full/path/to/your/openwrt/install" echo "Please provide the FULL path for following three directories as input arguments, in same order: " echo " 1) Toolchain Directory for Compile (ex: ~/openwrt/staging/toolchain_...), and" echo " 2) Target Directory for Compile Dependencies (ex: ~/openwrt/staging/target_...), and" echo " 3) Destination Directory for Install (ex: ~/openwrt/files/)" exit 1 fi export STAGING_DIR="${TOOLCHAINDIR}/.." export BASEDIR=$PWD mkdir -p working-dir cd working-dir #Tools export CFLAGS="-I${TARGETDIR}/usr/include" export CPPFLAGS="-I${TARGETDIR}/usr/include" export CXXFLAGS="-I${TARGETDIR}/usr/include" export LDFLAGS="-L${TARGETDIR}/usr/lib" export PKG_CONFIG_PATH="${TARGETDIR}/usr/lib/pkgconfig" export AR=$TOOLCHAINDIR/bin/arm-openwrt-linux-ar export AS=$TOOLCHAINDIR/bin/arm-openwrt-linux-as export LD=$TOOLCHAINDIR/bin/arm-openwrt-linux-ld export NM=$TOOLCHAINDIR/bin/arm-openwrt-linux-nm export CC=$TOOLCHAINDIR/bin/arm-openwrt-linux-gcc export CPP=$TOOLCHAINDIR/bin/arm-openwrt-linux-cpp export GCC=$TOOLCHAINDIR/bin/arm-openwrt-linux-gcc export CXX=$TOOLCHAINDIR/bin/arm-openwrt-linux-g++ export LINK=$TOOLCHAINDIR/bin/arm-openwrt-linux-g++ export RANLIB=$TOOLCHAINDIR/bin/arm-openwrt-linux-ranlib export MOLOCHUSER=daemon export GROUPNAME=daemon export PASSWORD=0mgMolochRules1 export INTERFACE=eth0 export BATCHRUN=yes #Get Source Code, Configure, Build and Install Yara rm -rf yara-$YARA.tar.gz rm -rf yara-$YARA/ wget https://github.com/VirusTotal/yara/archive/v$YARA.tar.gz tar zxf v$YARA.tar.gz cd yara-$YARA/ ./bootstrap.sh ./configure --host=arm-openwrt-linux \ --prefix=$TARGETDIR/$PREFIX/ \ --exec_prefix=$TARGETDIR/$PREFIX make make install cp -a $TARGETDIR/usr/lib/libyara.so* $FILESDIR/$PREFIX/lib cd .. #Get Source Code, Configure, Build and Install GeoIP rm -rf GeoIP-$GEOIP.tar.gz rm -rf GeoIP-$GEOIP/ wget http://www.maxmind.com/download/geoip/api/c/GeoIP-$GEOIP.tar.gz tar zxf GeoIP-$GEOIP.tar.gz cd GeoIP-$GEOIP/ export ac_cv_func_malloc_0_nonnull=yes export ac_cv_func_realloc_0_nonnull=yes ./configure --host=arm-openwrt-linux \ --prefix=$TARGETDIR/$PREFIX/ \ --exec_prefix=$TARGETDIR/$PREFIX/ make make install cp -a $TARGETDIR/usr/lib/libGeoIP.so* $FILESDIR/$PREFIX/lib cd .. #Get Source Code, Configure, Build and Install Moloch rm -rf v$MOLOCH.tar.gz rm -rf moloch-$MOLOCH/ wget https://github.com/aol/moloch/archive/v$MOLOCH.tar.gz tar zxf v$MOLOCH.tar.gz cd moloch-$MOLOCH patch -p1 < $BASEDIR/moloch-$MOLOCH-patch export LIBS="-lz" ./configure --host=arm-openwrt-linux \ --prefix=$TARGETDIR/$PREFIX/ \ --exec_prefix=$TARGETDIR/$PREFIX/ make #Install... rm -rf $FILESDIR/$TDIR #Moloch-Capture Binary mkdir -p $FILESDIR/$TDIR/bin cp capture/moloch-capture $FILESDIR/$TDIR/bin/ #Moloch-Viewer mkdir -p $FILESDIR/$TDIR/viewer cp -a viewer/* $FILESDIR/$TDIR/viewer/ #Moloch-Viewer mkdir -p $FILESDIR/$TDIR/raw # Install Moloch-Capture Parsers mkdir -p $FILESDIR/$TDIR/parsers/ cp capture/parsers/*.so $FILESDIR/$TDIR/parsers/ cp capture/parsers/*.jade $FILESDIR/$TDIR/parsers/ #Moloch Configuration Templates mkdir -p $FILESDIR/$TDIR/etc cp single-host/etc/* $FILESDIR/$TDIR/etc cat single-host/etc/elasticsearch.yml | sed -e "s,_TDIR_,${TDIR},g" > $FILESDIR/$TDIR/etc/elasticsearch.yml ./easybutton-config.sh "$FILESDIR/$TDIR" cat $FILESDIR/${TDIR}/etc/config.ini.template | sed -e 's/_PASSWORD_/'${PASSWORD}'/g' -e 's/_USERNAME_/'${MOLOCHUSER}'/g' -e 's/_GROUPNAME_/'${GROUPNAME}'/g' -e 's/_INTERFACE_/'${INTERFACE}'/g' -e "s,_TDIR_,${TDIR},g" > $FILESDIR/${TDIR}/etc/config.ini wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz gunzip GeoIP.dat.gz > $FILESDIR/$TDIR/etc/GeoIP.dat wget http://www.maxmind.com/download/geoip/database/asnum/GeoIPASNum.dat.gz gunzip GeoIPASNum.dat.gz > $FILESDIR/$TDIR/etc/GeoIPASNum.dat wget -O $FILESDIR/$TDIR/etc/ipv4-address-space.csv https://www.iana.org/assignments/ipv4-address-space/ipv4-address-space.csv cd .. #Get Source Code, Configure, Build and Install fs-ext NODEFSEXT=0.5.0 rm -rf v$NODEFSEXT.tar.gz rm -rf node-fs-ext-$NODEFSEXT/ wget https://github.com/baudehlo/node-fs-ext/archive/v$NODEFSEXT.tar.gz tar zxf v$NODEFSEXT.tar.gz cd node-fs-ext-0.5.0/ #npm --arch=arm --target=v4.6.0 install rm -rf $FILESDIR/$TDIR/viewer/node_modules/fs-ext mkdir -p $FILESDIR/$TDIR/viewer/node_modules/fs-ext cp -a * $FILESDIR/$TDIR/viewer/node_modules/fs-ext/ cd .. #Get Source Code, Configure, Build and Install png NODEPNG=3.0.3 rm -rf $NODEPNG.tar.gz rm -rf node-png-$NODEPNG/ wget https://github.com/pkrumins/node-png/archive/$NODEPNG.tar.gz tar zxf $NODEPNG.tar.gz cd node-png-$NODEPNG/ #unzip -o node-png-node-v12.zip #cd node-png-node-v12/ #npm --arch=arm --target=v4.6.0 install rm -rf $FILESDIR/$TDIR/viewer/node_modules/png mkdir -p $FILESDIR/$TDIR/viewer/node_modules/png cp -a * $FILESDIR/$TDIR/viewer/node_modules/png/ cd .. cd $BASEDIR exit 0
prywesee/easybuttons
openwrt-moloch.sh
Shell
apache-2.0
5,226
#!/bin/bash # http://redsymbol.net/articles/unofficial-bash-strict-mode/ set -euo pipefail IFS=$'\n\t' test_file="./ghe-backup-test/region-replacement/convert-kms-private-ssh-key.sh" ../replace-convert-properties.sh "###REGION###" "eu-west-1" $test_file if grep -Fxq "###REGION###" "$test_file" then exit 1 # NO success else exit 0 # success fi
zalando/ghe-backup
bashtest/test-replace-convert-properties.sh
Shell
apache-2.0
356
#!/bin/sh set -ex . infra/install_rust.sh cargo build cargo test
notriddle/aelita
infra/build_dynamic.sh
Shell
apache-2.0
66
#!/usr/bin/env bash . service.sh install_app if [ $? -eq 0 ]; then start_jetty fi
stevenshlin/ismming
api/startup.sh
Shell
apache-2.0
87
#!/bin/bash function STOUT(){ $@ 1>/tmp/standard_out 2>/tmp/error_out if [ "$?" -eq "0" ] then cat /tmp/standard_out | awk '{print "[COUT]", $0}' cat /tmp/error_out | awk '{print "[COUT]", $0}' >&2 return 0 else cat /tmp/standard_out | awk '{print "[COUT]", $0}' cat /tmp/error_out | awk '{print "[COUT]", $0}' >&2 return 1 fi } function STOUT2(){ $@ 1>/dev/null 2>/tmp/error_out if [ "$?" -eq "0" ] then cat /tmp/error_out | grep ERROR | awk '{print "[COUT]", $0}' >&2 return 0 else cat /tmp/error_out | grep ERROR | awk '{print "[COUT]", $0}' >&2 return 1 fi } declare -A map=( ["git-url"]="" ["out-put-type"]="" ["report-path"]="" ["version"]="" ) data=$(echo $CO_DATA |awk '{print}') for i in ${data[@]} do temp=$(echo $i |awk -F '=' '{print $1}') value=$(echo $i |awk -F '=' '{print $2}') for key in ${!map[@]} do if [ "$temp" = "$key" ] then map[$key]=$value fi done done if [ "" = "${map["git-url"]}" ] then printf "[COUT] Handle input error: %s\n" "git-url" printf "[COUT] CO_RESULT = %s\n" "false" exit fi if [[ "${map["out-put-type"]}" =~ ^(xml|json|yaml)$ ]] then printf "[COUT] out-put-type: %s\n" "${map["out-put-type"]}" 1>/dev/null else printf "[COUT] Handle input error: %s\n" "out-put-type should be one of xml,json,yaml" printf "[COUT] CO_RESULT = %s\n" "false" exit fi if [ "${map["version"]}" = "gradle3" ] then gradle_version=$gradle3/gradle elif [ "${map["version"]}" = "gradle4" ] then gradle_version=$gradle4/gradle else printf "[COUT] Handle input error: %s\n" "version should be one of gradle3,gradle4" printf "[COUT] CO_RESULT = %s\n" "false" exit fi if [ "" = "${map["report-path"]}" ] then map["report-path"]="build/reports/pmd" fi STOUT git clone ${map["git-url"]} if [ "$?" -ne "0" ] then printf "[COUT] CO_RESULT = %s\n" "false" exit fi pdir=`echo ${map["git-url"]} | awk -F '/' '{print $NF}' | awk -F '.' '{print $1}'` cd ./$pdir if [ ! -f "build.gradle" ] then printf "[COUT] CO_RESULT = file build.gradle not found! \n" printf "[COUT] CO_RESULT = %s\n" "false" exit fi havepmd=`echo gradle -q tasks --all | grep pmd` if [ "$havepmd" = "" ] then cat /root/pmd.conf >> build.gradle fi STOUT2 $gradle_version pmdMain STOUT2 $gradle_version pmdTest if [ "${map["out-put-type"]}" = "xml" ] then cat ${map["report-path"]}/main.xml cat ${map["report-path"]}/test.xml else java -jar /root/convert.jar ${map["report-path"]}/main.xml ${map["out-put-type"]} java -jar /root/convert.jar ${map["report-path"]}/test.xml ${map["out-put-type"]} fi printf "\n[COUT] CO_RESULT = %s\n" "true" exit
Lupino/containerops
component/java/analysis/pmd/compile.sh
Shell
apache-2.0
2,811
#!/bin/bash function cd_and_cmd() { local my_cmd="pwd" cd /tmp || return 1 eval $my_cmd || return 1 return 0 } function my_cd() { local my_cmd="echo 'running pwd:';pwd" eval $my_cmd || return 1 cd /tmp || return 1 return 0 } PWD=$(readlink -f $(dirname $0)) echo "PWD $PWD" pwd start_dir="$PWD" echo "start_dir=$start_dir" echo (my_cd; echo "in subshell"; echo "PWD $PWD"; pwd; echo) echo "back in parent shell" echo "PWD $PWD" pwd echo "start_dir=$start_dir" echo my_cd echo "PWD $PWD" pwd echo "start_dir=$start_dir" echo
loafdog/loaf-src
scripts/bash-scripts/test-cd.sh
Shell
apache-2.0
565
#!/bin/bash -x # Local provisioning mode (host is Ansible controller) if [ $1 = "--local" ]; then if [ ! -d "~/the-app" ]; then git clone https://github.com/devops-dojo/the-app.git ~/the-app fi cd ~/the-app # Force reset local GIT repo to match Github git fetch --all git reset --hard origin/master # Exclude global variables from rsync, but create if it doesn't exist if [ ! -f "/provision/vars/default.yml" ]; then cp vagrant/provision/vars/default.yml /provision/vars/default.yml fi # Stick to branch in vars/default.yml github_branch=`awk '/github_branch/ {printf "%s",$2;exit}' /provision/vars/default.yml` # Clean local tags if they changed on Github if [ github_branch != "master" ]; then github_tag=`echo ${github_branch} | awk -F "/" '/tags/ {print $3}'` git tag -d ${github_tag} git fetch origin --tags fi git checkout ${github_branch} sudo rsync -aHAXvh --update --exclude 'vagrant/provision/vars/default.yml' vagrant/provision / shift fi # This directory is synced by vagrant or copied with above code cd /provision ls -alrt echo ${http_proxy} # Are we behind web proxy? if grep -Fxq "with_proxy: true" vars/default.yml; then proxy_host=`awk '/http_proxy_host/ {printf "%s",$2;exit}' vars/default.yml` proxy_port=`awk '/http_proxy_port/ {printf "%s",$2;exit}' vars/default.yml` export http_proxy=http://${proxy_host}:${proxy_port} export https_proxy=http://${proxy_host}:${proxy_port} fi # Install or update ansible if not there export DEBIAN_FRONTEND=noninteractive if ! command -v ansible >/dev/null 2>&1; then sudo -E apt-get update sudo -E apt-get install -y software-properties-common python-apt aptitude sudo -E apt-key add ansible.key.txt sudo -E apt-add-repository --yes --update ppa:ansible/ansible sudo -E apt-get install -y ansible fi echo "RUNNING ansible-playbook -c local --inventory-file=hosts --extra-vars='ansible_ssh_user=vagrant' --user=vagrant " $@ ansible-playbook -c local --inventory-file=hosts --extra-vars='ansible_ssh_user=vagrant' --user=vagrant "$@"
devops-dojo/the-app
vagrant/scripts/provision.sh
Shell
apache-2.0
2,077
#!/bin/bash ##----------------------------------------------------------------------------- ## Title : my_can ## Project : SeeGrip ## Library: : ##----------------------------------------------------------------------------- ## File name : my_can.sh ## Author : hhanff ## Company : DFKI ## Last update : 2011-11-19 ## Platform : lin ##----------------------------------------------------------------------------- ## Purpose : Provides a means to automatically set up your system ## to connect to the CAN/USB adapter ## ##----------------------------------------------------------------------------- ## Assumptions : If your kernel version is not 2.6.35-25 you have to ## re-compile socketcan and iproute 2 for your machine. ## This process is also described on: ## https://svn.hb.dfki.de/SeeGrip-Trac/wiki/CAN ## Brief description of this process: ## > cd socketcan-usb/socketcan-usb2-patchV3/trunk/kernel/2.6 ## Evtl vorher in den Dateien esd_usb2.c und ems_usb.c die folgenden Funktionen ersetzen: ## usb_buffer_alloc() is renamed to usb_alloc_coherent() ## usb_buffer_free() is renamed to usb_free_coherent() ## > make ## > mkdir /lib/modules/$(uname -r)/socketcan ## > find -name \*.ko | xargs install -t /lib/modules/$(uname -r)/socketcan ## > depmod $(uname -r) ## ## > cd socketcan-usb/iproute2 ## > make ## > make install ## Limitations : ## Known Errors: ##----------------------------------------------------------------------------- ## Revisions : ## Date Version Author Description ## 2011-12-19 1.0 Hendrik Hanff Created ##----------------------------------------------------------------------------- # error on first failed command or unreferencing a undefined variable: #set -eu #set -x echo " " >> /tmp/can-usb # Would be cool if somebody comes up with a method of testing if these # modules are actually loaded and only remove them if they are loaded... date >> /tmp/can-usb echo "Closing link..." >> /tmp/can-usb sudo /sbin/ip link set can0 down echo "Unloading kernel modules..." >> /tmp/can-usb if sudo grep "esd_usb2" /proc/modules; then sudo rmmod esd_usb2 fi if grep "ems_usb" /proc/modules; then sudo rmmod ems_usb fi if grep "can_dev" /proc/modules; then sudo rmmod can_dev fi if grep "can_raw" /proc/modules; then sudo rmmod can_raw fi if grep "can" /proc/modules; then sudo rmmod can fi echo "Loading kernel modules..." >> /tmp/can-usb sudo insmod /lib/modules/2.6.35-25-generic/socketcan/can.ko sudo insmod /lib/modules/2.6.35-25-generic/socketcan/can-raw.ko sudo insmod /lib/modules/2.6.35-25-generic/socketcan/can-dev.ko sudo insmod /lib/modules/2.6.35-25-generic/socketcan/ems_usb.ko sudo insmod /lib/modules/2.6.35-25-generic/socketcan/esd_usb2.ko echo "Drivers loaded" >> /tmp/can-usb echo "Please DISconnect the USB to CAN vonverter from your PC and hit <Enter>" read TRASH; echo "Please connect the USB to CAN vonverter to your PC and hit <Enter>" read TRASH; /bin/sleep 3 echo "Starting link..." >> /tmp/can-usb sudo /sbin/ip link set can0 up type can bitrate 1000000 restart-ms 1000 echo "Checking if device is registered..." >> /tmp/can-usb # Test if can0 was really registered. sudo tail -n 2 /var/log/messages | grep "can0 registered" >> /tmp/can-usb exit 0
hhanff/software
bash/my_can.sh
Shell
apache-2.0
3,538
#!/bin/bash -e # define location of openssl binary manually since running this # script under Vagrant fails on some systems without it OPENSSL=/usr/bin/openssl function usage { echo "USAGE: $0 <output-dir> <cert-base-name> <CN> [SAN,SAN,SAN]" echo " example: $0 ./ssl/ worker kube-worker IP.1=127.0.0.1,IP.2=10.0.0.1" } if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then usage exit 1 fi OUTDIR="$1" CERTBASE="$2" CN="$3" SANS="$4" if [ ! -d $OUTDIR ]; then echo "ERROR: output directory does not exist: $OUTDIR" exit 1 fi OUTFILE="$OUTDIR/$CN.tar" if [ -f "$OUTFILE" ];then exit 0 fi CNF_TEMPLATE=" [req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] [ v3_req ] basicConstraints = CA:FALSE keyUsage = nonRepudiation, digitalSignature, keyEncipherment subjectAltName = @alt_names [alt_names] DNS.1 = kubernetes DNS.2 = kubernetes.default DNS.3 = kubernetes.default.svc DNS.4 = kubernetes.default.svc.cluster.local " echo "Generating SSL artifacts in $OUTDIR" CONFIGFILE="$OUTDIR/$CERTBASE-req.cnf" CAFILE="$OUTDIR/ca.pem" CAKEYFILE="$OUTDIR/ca-key.pem" KEYFILE="$OUTDIR/$CERTBASE-key.pem" CSRFILE="$OUTDIR/$CERTBASE.csr" PEMFILE="$OUTDIR/$CERTBASE.pem" CONTENTS="${CAFILE} ${KEYFILE} ${PEMFILE}" # Add SANs to openssl config echo "$CNF_TEMPLATE$(echo $SANS | tr ',' '\n')" > "$CONFIGFILE" $OPENSSL genrsa -out "$KEYFILE" 2048 $OPENSSL req -new -key "$KEYFILE" -out "$CSRFILE" -subj "/CN=$CN" -config "$CONFIGFILE" $OPENSSL x509 -req -in "$CSRFILE" -CA "$CAFILE" -CAkey "$CAKEYFILE" -CAcreateserial -out "$PEMFILE" -days 365 -extensions v3_req -extfile "$CONFIGFILE" tar -cf $OUTFILE -C $OUTDIR $(for f in $CONTENTS;do printf "$(basename $f) ";done) echo "Bundled SSL artifacts into $OUTFILE" echo "$CONTENTS"
ChinaCloud/ansible-kubernetes
roles/k8s-ssh/files/init-ssl.sh
Shell
apache-2.0
1,802
#!/bin/bash # # Install the Windows packaging tools on Ubuntu # set -e BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )" USE_PREBUILT=true source $BASE_DIR/scripts/common.sh # Install mono if necessary if [ ! -f /usr/bin/nuget ]; then sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF echo "deb http://download.mono-project.com/repo/debian wheezy/snapshots/3.12.0 main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list sudo apt-get update sudo apt-get install -qq mono-devel mono-gmcs nuget fi # Ensure that some directories exist mkdir -p $DOWNLOAD_DIR mkdir -p $DEPS_DIR # Download Chocolatey if $USE_PREBUILT; then if [ ! -e $DOWNLOAD_DIR/choco-${CHOCO_VERSION}-binaries.tar.gz ]; then # Download a prebuilt version cd $DOWNLOAD_DIR curl -LO http://files.troodon-software.com/choco/choco-${CHOCO_VERSION}-binaries.tar.gz cd $BASE_DIR fi else if [ ! -e $DOWNLOAD_DIR/choco-$CHOCO_VERSION.tar.gz ]; then # Download the source curl -L -o $DOWNLOAD_DIR/choco-$CHOCO_VERSION.tar.gz https://github.com/chocolatey/choco/archive/$CHOCO_VERSION.tar.gz fi fi # Build Chocolatey if not already built if [ ! -e $DEPS_INSTALL_DIR/chocolatey/console/choco.exe ]; then if $USE_PREBUILT; then tar xf $DOWNLOAD_DIR/choco-${CHOCO_VERSION}-binaries.tar.gz -C $DEPS_INSTALL_DIR else cd $DEPS_DIR rm -fr choco-* tar xf $DOWNLOAD_DIR/choco-$CHOCO_VERSION.tar.gz cd choco-$CHOCO_VERSION nuget restore src/chocolatey.sln chmod +x build.sh ./build.sh -v cp -Rf code_drop/chocolatey $DEPS_INSTALL_DIR/ fi fi
michaelkschmidt/fwup
scripts/ubuntu_install_chocolatey.sh
Shell
apache-2.0
1,734
#!/usr/bin/env bash set -euo pipefail IFS=$'\n\t' export TLS_ENABLED=true export TLS_KEYSTORE=/tmp/keystore mvn clean install
rapidoid/rapidoid
bin/test-tls.sh
Shell
apache-2.0
128
#月流水 零售月流水 curl http://admin.gzlerong.com/index.php/Admin/Monitor/stream/key/3fa283d936cc83a698bfa14e94eced9b/type/month >> /home/bash.log; #代理月报表 curl http://admin.gzlerong.com/index.php/Admin/Monitor/agentstream/key/3fa283d936cc83a698bfa14e94eced9b/type/month >> /home/bash.log;
jelly074100209/gg_admin
Monitor/crontab_per_month.sh
Shell
apache-2.0
306
ulimit -n 5200 export PROV_PATH="./prov-files" export RUN_ID=$(date | md5 | awk '{print $1}') #mpiexec.hydra python -m dispel4py.new.processor mpi test.rtxcorr.rtxcorr3 -f $RUN_PATH/input python -m dispel4py.new.processor multi test.rtxcorr.xcorr_parallel -n 50 -f xcrr-input #python -m dispel4py.new.processor simple test.rtxcorr.rtxcor_rays -f xcrr-input
aspinuso/dispel4py
launch-rtxcor-parallel.sh
Shell
apache-2.0
359
#!/bin/sh eval $(aws --region us-east-1 ecr get-login --no-include-email) docker build --no-cache -t 326027360148.dkr.ecr.us-east-1.amazonaws.com/airflow:latest . docker push 326027360148.dkr.ecr.us-east-1.amazonaws.com/airflow:latest
gilt/incubator-airflow
airflow/hbc_deployment/scripts/deployment/ecs/build_push_image.sh
Shell
apache-2.0
236
#!/usr/bin/env bash for SAMPLE in `bcftools query -l $1` do echo "Handling ${SAMPLE}..." bcftools view --exclude-uncalled --exclude-types ref -s ${SAMPLE} -O v $1 > $2.${SAMPLE}.vcf done
mahajrod/MACE
scripts/split_vcf_by_samples_bcftools.sh
Shell
apache-2.0
202
#!/system/bin/sh # Copyright (c) 2012, Code Aurora Forum. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of Code Aurora nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # start ril-daemon only for targets on which radio is present # baseband=`getprop ro.baseband` multirild=`getprop ro.multi.rild` dsds=`getprop persist.dsds.enabled` netmgr=`getprop ro.use_data_netmgrd` case "$baseband" in "apq") setprop ro.radio.noril yes stop ril-daemon esac case "$baseband" in "msm" | "csfb" | "svlte2a" | "mdm" | "sglte" | "unknown") start qmuxd case "$baseband" in "svlte2a" | "csfb") start qmiproxy esac case "$multirild" in "true") case "$dsds" in "true") start ril-daemon1 esac esac case "$netmgr" in "true") start netmgrd esac esac # # enable bluetooth profiles dynamically # case $baseband in "apq") setprop ro.qualcomm.bluetooth.opp true setprop ro.qualcomm.bluetooth.hfp false setprop ro.qualcomm.bluetooth.hsp false setprop ro.qualcomm.bluetooth.pbap true setprop ro.qualcomm.bluetooth.ftp true setprop ro.qualcomm.bluetooth.map true setprop ro.qualcomm.bluetooth.nap false setprop ro.qualcomm.bluetooth.sap false setprop ro.qualcomm.bluetooth.dun false ;; "mdm" | "svlte2a" | "svlte1" | "csfb") setprop ro.qualcomm.bluetooth.opp true setprop ro.qualcomm.bluetooth.hfp true setprop ro.qualcomm.bluetooth.hsp true setprop ro.qualcomm.bluetooth.pbap true setprop ro.qualcomm.bluetooth.ftp true setprop ro.qualcomm.bluetooth.map true setprop ro.qualcomm.bluetooth.nap true setprop ro.qualcomm.bluetooth.sap true setprop ro.qualcomm.bluetooth.dun false ;; "msm") setprop ro.qualcomm.bluetooth.opp true setprop ro.qualcomm.bluetooth.hfp true setprop ro.qualcomm.bluetooth.hsp true setprop ro.qualcomm.bluetooth.pbap true setprop ro.qualcomm.bluetooth.ftp true setprop ro.qualcomm.bluetooth.map true setprop ro.qualcomm.bluetooth.nap true setprop ro.qualcomm.bluetooth.sap true setprop ro.qualcomm.bluetooth.dun true ;; "mpq") setprop ro.qualcomm.bluetooth.opp false setprop ro.qualcomm.bluetooth.hfp false setprop ro.qualcomm.bluetooth.hsp false setprop ro.qualcomm.bluetooth.pbap false setprop ro.qualcomm.bluetooth.ftp false setprop ro.qualcomm.bluetooth.map false setprop ro.qualcomm.bluetooth.nap false setprop ro.qualcomm.bluetooth.sap false setprop ro.qualcomm.bluetooth.dun false ;; *) setprop ro.qualcomm.bluetooth.opp true setprop ro.qualcomm.bluetooth.hfp true setprop ro.qualcomm.bluetooth.hsp true setprop ro.qualcomm.bluetooth.pbap true setprop ro.qualcomm.bluetooth.ftp true setprop ro.qualcomm.bluetooth.map true setprop ro.qualcomm.bluetooth.nap true setprop ro.qualcomm.bluetooth.sap true setprop ro.qualcomm.bluetooth.dun true ;; esac
alexforsale/android_device_hisense_msm7x27a-common
rootdir/init.qcom.class_main.sh
Shell
apache-2.0
4,529
#!/usr/bin/env bash echo Check command line parameter . $(pwd)/wikijournals.conf echo html directory: $HTMLDIR echo wikijournals directory: $WIKIDIR echo dbuser: $DBUSER echo dbpass: $DBPASS echo dbserver: $DBSERVER echo dbname: $DBNAME echo wikiuser: $WIKIUSER echo wikipwd: $WIKIPWD echo wikiname: $WIKINAME echo Mediawiki Main Version: $MEDIAWIKIVERSION echo Mediawiki Minor Version: $MEDIAWIKIMINORVERSION echo remove wiki directory rm -rf $HTMLDIR/$WIKIDIR echo remove wiki tables from database MYSQL=$(which mysql) AWK=$(which awk) GREP=$(which grep) TABLES=$($MYSQL -u $DBUSER -p$DBPASS $DBNAME -e 'show tables' | $AWK '{ print $1}' | $GREP -v '^Tables' ) for t in $TABLES do echo "Deleting $t table from $DBNAME database..." $MYSQL -u $DBUSER -p$DBPASS $DBNAME -e "drop table $t" done
swertschak/wikijournals
install/linux/rollback_installation.sh
Shell
apache-2.0
803
set -x set -e export HDFS_ROOT=$1 # Build JAR ./gradlew clean jar # Create deploy dir at build/celos-deploy export DEPLOY_DIR=build/celos-deploy rm -rf $DEPLOY_DIR mkdir -p $DEPLOY_DIR/hdfs/lib # Copy files into deploy dir cp src/main/celos/* $DEPLOY_DIR/ cp src/main/oozie/* $DEPLOY_DIR/hdfs/ cp build/libs/* $DEPLOY_DIR/hdfs/lib/ # Run Celos CI java -jar ../../celos-ci/build/libs/celos-ci-fat.jar --workflowName wordcount --mode deploy --deployDir $DEPLOY_DIR --target $TARGET_FILE --hdfsRoot $HDFS_ROOT
collectivemedia/celos
samples/quickstart/ci/deploy.sh
Shell
apache-2.0
511
#!/bin/sh DOCKER_DIR=/open-falcon of_bin=$DOCKER_DIR/open-falcon DOCKER_HOST_IP=$(route -n | awk '/UG[ \t]/{print $2}') # Search $1 and replace with $2 or $3(defualt) replace() { replacement=$2 if [ -z "$replacement" ]; then replacement=$3 fi find $DOCKER_DIR/*/config/*.json -type f -exec sed -i "s/$1/$replacement/g" {} \; } replace "%%MYSQL%%" "$MYSQL_PORT" "$DOCKER_HOST_IP:3306" replace "%%REDIS%%" "$REDIS_PORT" "$DOCKER_HOST_IP:6379" replace "%%AGGREGATOR_HTTP%%" "$AGGREGATOR_HTTP" "0.0.0.0:6055" replace "%%GRAPH_HTTP%%" "$GRAPH_HTTP" "0.0.0.0:6071" replace "%%GRAPH_RPC%%" "$GRAPH_RPC" "0.0.0.0:6070" replace "%%HBS_HTTP%%" "$HBS_HTTP" "0.0.0.0:6031" replace "%%HBS_RPC%%" "$HBS_RPC" "0.0.0.0:6030" replace "%%JUDGE_HTTP%%" "$JUDGE_HTTP" "0.0.0.0:6081" replace "%%JUDGE_RPC%%" "$JUDGE_RPC" "0.0.0.0:6080" replace "%%NODATA_HTTP%%" "$NODATA_HTTP" "0.0.0.0:6090" replace "%%TRANSFER_HTTP%%" "$TRANSFER_HTTP" "0.0.0.0:6060" replace "%%TRANSFER_RPC%%" "$TRANSFER_RPC" "0.0.0.0:8433" replace "%%PLUS_API_HTTP%%" "$PLUS_API_HTTP" "0.0.0.0:8080" replace "%%AGENT_HOSTNAME%%" "$AGENT_HOSTNAME" "" #use absolute path of metric_list_file in docker TAB=$'\t'; sed -i "s|.*metric_list_file.*|${TAB}\"metric_list_file\": \"$DOCKER_DIR/api/data/metric\",|g" $DOCKER_DIR/api/config/*.json; action=$1 module_name=$2 case $action in run) $DOCKER_DIR/"$module_name"/bin/falcon-"$module_name" -c /open-falcon/"$module_name"/config/cfg.json ;; *) supervisorctl $* ;; esac
open-falcon/falcon-plus
docker/ctrl.sh
Shell
apache-2.0
1,523
rm -rf cssTestData cssTestApps test.pid mkdir cssTestData cssTestApps ./arangod -c arangod.conf --pid-file test.pid --log.requests-file req.log & sleep 5 casperjs test standalone/compareAll.js res=$? kill `cat test.pid` exit $res
triAGENS/ArangoDB-UI-Tests
startTest.sh
Shell
apache-2.0
230
#!/bin/bash # Copyright 2019 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Presubmit script triggered by Prow. ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" . ${ROOT}/prow/utils/prow_test_utils.sh PRESUBMIT_TEST_CASE=asan . ${ROOT}/prow/gcpproxy-presubmit.sh
GoogleCloudPlatform/esp-v2
prow/presubmit-asan.sh
Shell
apache-2.0
788
#!/usr/bin/env bash # Copyright 2018 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -o errexit set -o nounset set -o pipefail if [[ -n "${TEST_WORKSPACE:-}" ]]; then # Running inside bazel echo "Validating job configs..." >&2 elif ! command -v bazel &> /dev/null; then echo "Install bazel at https://bazel.build" >&2 exit 1 else ( set -o xtrace bazel test --test_output=streamed //hack:verify-config ) exit 0 fi trap 'echo ERROR: security jobs changed, run hack/update-config.sh >&2' ERR echo -n "Running checkconfig with strict warnings..." >&2 "$@" \ --strict \ --warnings=mismatched-tide-lenient \ --warnings=tide-strict-branch \ --warnings=needs-ok-to-test \ --warnings=validate-owners \ --warnings=missing-trigger \ --warnings=validate-urls \ --warnings=unknown-fields echo PASS echo -n "Checking generated security jobs..." >&2 d=$(diff config/jobs/kubernetes-security/generated-security-jobs.yaml hack/zz.security-jobs.yaml || true) if [[ -n "$d" ]]; then echo "FAIL" >&2 echo "< unexpected" >&2 echo "> missing" >&2 echo "$d" >&2 false fi
krzyzacy/test-infra
hack/verify-config.sh
Shell
apache-2.0
1,641
#!/bin/bash set -e [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" [[ -z "${GCOVR_DIR}" ]] && GCOVR_DIR="${SRCDIR}/bazel-$(basename "${SRCDIR}")" [[ -z "${TESTLOGS_DIR}" ]] && TESTLOGS_DIR="${SRCDIR}/bazel-testlogs" [[ -z "${BAZEL_COVERAGE}" ]] && BAZEL_COVERAGE=bazel [[ -z "${GCOVR}" ]] && GCOVR=gcovr # This is the target that will be run to generate coverage data. It can be overriden by consumer # projects that want to run coverage on a different/combined target. [[ -z "${COVERAGE_TARGET}" ]] && COVERAGE_TARGET="//test/coverage:coverage_tests" # Make sure ${COVERAGE_TARGET} is up-to-date. SCRIPT_DIR="$(realpath "$(dirname "$0")")" (BAZEL_BIN="${BAZEL_COVERAGE}" "${SCRIPT_DIR}"/coverage/gen_build.sh) echo "Cleaning .gcda/.gcov from previous coverage runs..." for f in $(find -L "${GCOVR_DIR}" -name "*.gcda" -o -name "*.gcov") do rm -f "${f}" done echo "Cleanup completed." # Run all tests under "bazel test", no sandbox. We're going to generate the # .gcda inplace in the bazel-out/ directory. This is in contrast to the "bazel # coverage" method, which is currently broken for C++ (see # https://github.com/bazelbuild/bazel/issues/1118). This works today as we have # a single coverage test binary and do not require the "bazel coverage" support # for collecting multiple traces and glueing them together. "${BAZEL_COVERAGE}" --batch test "${COVERAGE_TARGET}" ${BAZEL_TEST_OPTIONS} \ --cache_test_results=no --cxxopt="--coverage" --linkopt="--coverage" \ --test_output=all --strategy=Genrule=standalone --spawn_strategy=standalone # The Bazel build has a lot of whack in it, in particular generated files, headers from external # deps, etc. So, we exclude this from gcov to avoid false reporting of these files in the html and # stats. The #foo# pattern is because gcov produces files such as # bazel-out#local-fastbuild#bin#external#spdlog_git#_virtual_includes#spdlog#spdlog#details#pattern_formatter_impl.h.gcov. # To find these while modifying this regex, perform a gcov run with -k set. [[ -z "${GCOVR_EXCLUDE_REGEX}" ]] && GCOVR_EXCLUDE_REGEX=".*pb.h.gcov|.*#genfiles#.*|test#.*|external#.*|.*#external#.*|.*#prebuilt#.*" [[ -z "${GCOVR_EXCLUDE_DIR}" ]] && GCOVR_EXCLUDE_DIR=".*/external/.*" COVERAGE_DIR="${SRCDIR}"/generated/coverage mkdir -p "${COVERAGE_DIR}" COVERAGE_SUMMARY="${COVERAGE_DIR}/coverage_summary.txt" # gcovr is extremely picky about where it is run and where the paths of the # original source are relative to its execution location. cd "${SRCDIR}" echo "Running gcovr..." time "${GCOVR}" --gcov-exclude="${GCOVR_EXCLUDE_REGEX}" \ --exclude-directories="${GCOVR_EXCLUDE_DIR}" --object-directory="${GCOVR_DIR}" -r "${SRCDIR}" \ --html --html-details --exclude-unreachable-branches --print-summary \ -o "${COVERAGE_DIR}"/coverage.html > "${COVERAGE_SUMMARY}" COVERAGE_VALUE=$(grep -Po 'lines: \K(\d|\.)*' "${COVERAGE_SUMMARY}") COVERAGE_THRESHOLD=98.0 COVERAGE_FAILED=$(echo "${COVERAGE_VALUE}<${COVERAGE_THRESHOLD}" | bc) if test ${COVERAGE_FAILED} -eq 1; then echo Code coverage ${COVERAGE_VALUE} is lower than limit of ${COVERAGE_THRESHOLD} exit 1 else echo Code coverage ${COVERAGE_VALUE} is good and higher than limit of ${COVERAGE_THRESHOLD} fi
tschroed/envoy
test/run_envoy_bazel_coverage.sh
Shell
apache-2.0
3,213
#!/bin/bash # announce this instance to etcd # TODO wait if etcd server not ready export ACTION=${1-PUT} export CONTAINER_ID=`hostname` export IP=`grep $CONTAINER_ID /etc/hosts | cut -f1` export URL="http://etcd:4001/v2/keys/sling/instances/`hostname`" echo "Announcing ($ACTION) $IP:$SLING_PORT to $URL" # TODO need json escapes curl -s ${URL} -X$ACTION -d value="{\"ip\":\"$IP\",\"port\":\"$SLING_PORT\",\"role\":\"$SLING_ROLE\"}"
bdelacretaz/sling-adaptto-2016
sling-images/images/base/src/main/docker/slingroot/announce.sh
Shell
apache-2.0
434
#!/usr/bin/env bash java -jar "$1/Mc2For.jar" -codegen -args "dble&3*1&REAL" "$2" gfortran *.f95 -O3 -o adpt gfortran *.f95 -O3 -o adpt gfortran *.f95 -O3 -o adpt
Sable/McTicToc
backends/fortran/Mc2For.sh
Shell
apache-2.0
164
# More verbose handling for 'set -e' # # Show a traceback if we're using bash, otherwise just a message # Downloaded from: https://gist.github.com/kergoth/3885825 on_exit () { ret=$? case $ret in 0) ;; *) echo >&2 "Exiting with $ret from a shell command" ;; esac } on_error () { local ret=$? local FRAMES=${#BASH_SOURCE[@]} echo >&2 "Traceback (most recent call last):" for ((frame=FRAMES-2; frame >= 0; frame--)); do local lineno=${BASH_LINENO[frame]} printf >&2 ' File "%s", line %d, in %s\n' "${BASH_SOURCE[frame+1]}" "$lineno" "${FUNCNAME[frame+1]}" sed >&2 -n "${lineno}s/^[ ]*/ /p" "${BASH_SOURCE[frame+1]}" done printf >&2 "Exiting with %d\n" "$ret" exit $ret } case "$BASH_VERSION" in '') trap on_exit EXIT ;; *) set -o errtrace trap on_error ERR ;; esac
bchretien/shell-sandbox
src/utils/verbose-set-e-combo-ideal.sh
Shell
bsd-2-clause
937
#!/bin/sh TESTING_DIR=$(readlink -f $(dirname "$0")) "$TESTING_DIR/Common/SetUp.sh" cd "$TESTING_DIR/Evaluation" ./$1.sh $2 cd "$TESTING_DIR" "$TESTING_DIR/Common/TearDown.sh"
crurik/GrapeFS
Testing/GrapeEvaluation.sh
Shell
bsd-2-clause
179
#!/bin/bash -Eeu # - - - - - - - - - - - - - - - - - - - - - - - - on_ci_publish_tagged_images() { if ! on_ci; then echo echo 'not on CI so not publishing tagged images' return fi #if git log -1 --pretty=%B | grep --quiet '[test-only]'; then # echo 'commit message contains [test-only] so stopping here' # return #fi echo echo 'on CI so publishing tagged images' local -r image="$(image_name)" local -r sha="$(image_sha)" local -r tag=${sha:0:7} docker push ${image}:latest docker push ${image}:${tag} } # - - - - - - - - - - - - - - - - - - - - - - - - on_ci() { [ -n "${CIRCLECI:-}" ] } #- - - - - - - - - - - - - - - - - - - - - - - - image_name() { echo "${CYBER_DOJO_RUNNER_IMAGE}" } #- - - - - - - - - - - - - - - - - - - - - - - - image_sha() { docker run --rm "$(image_name):latest" sh -c 'echo ${SHA}' }
cyber-dojo/runner
scripts/on_ci_publish_tagged_images.sh
Shell
bsd-2-clause
863
#!/usr/bin/env bash #===----------------------------------------------------------------------===## # # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # #===----------------------------------------------------------------------===## set -e PROGNAME="$(basename "${0}")" function error() { printf "error: %s\n" "$*"; exit 1; } function usage() { cat <<EOF Usage: ${PROGNAME} [options] [-h|--help] Display this help and exit. --llvm-root <DIR> Full path to the root of the LLVM monorepo. Only the libcxx and libcxxabi directories are required. --build-dir <DIR> Full path to the directory to use for building. This will contain intermediate build products. --install-dir <DIR> Full path to the directory to install the library to. --symbols-dir <DIR> Full path to the directory to install the .dSYM bundle to. --sdk <SDK> SDK used for building the library. This represents the target platform that the library will run on. You can get a list of SDKs with \`xcodebuild -showsdks\`. --architectures "<arch>..." A whitespace separated list of architectures to build for. The library will be built for each architecture independently, and a universal binary containing all architectures will be created from that. --version X[.Y[.Z]] The version of the library to encode in the dylib. --cache <PATH> The CMake cache to use to control how the library gets built. EOF } while [[ $# -gt 0 ]]; do case ${1} in -h|--help) usage exit 0 ;; --llvm-root) llvm_root="${2}" shift; shift ;; --build-dir) build_dir="${2}" shift; shift ;; --symbols-dir) symbols_dir="${2}" shift; shift ;; --install-dir) install_dir="${2}" shift; shift ;; --sdk) sdk="${2}" shift; shift ;; --architectures) architectures="${2}" shift; shift ;; --version) version="${2}" shift; shift ;; --cache) cache="${2}" shift; shift ;; *) error "Unknown argument '${1}'" ;; esac done for arg in llvm_root build_dir symbols_dir install_dir sdk architectures version cache; do if [ -z ${!arg+x} ]; then error "Missing required argument '--${arg//_/-}'" elif [ "${!arg}" == "" ]; then error "Argument to --${arg//_/-} must not be empty" fi done install_name_dir="/usr/lib" dylib_name="libc++.1.dylib" make_symlink="yes" function step() { separator="$(printf "%0.s-" $(seq 1 ${#1}))" echo echo "${separator}" echo "${1}" echo "${separator}" } for arch in ${architectures}; do step "Building libc++.dylib for architecture ${arch}" mkdir -p "${build_dir}/${arch}" (cd "${build_dir}/${arch}" && xcrun --sdk "${sdk}" cmake "${llvm_root}/libcxx" \ -GNinja \ -DCMAKE_MAKE_PROGRAM="$(xcrun --sdk "${sdk}" --find ninja)" \ -C "${cache}" \ -DCMAKE_INSTALL_PREFIX="${build_dir}/${arch}-install" \ -DCMAKE_INSTALL_NAME_DIR="${install_name_dir}" \ -DCMAKE_OSX_ARCHITECTURES="${arch}" \ -DLIBCXX_INCLUDE_BENCHMARKS=OFF \ -DLIBCXX_INCLUDE_TESTS=OFF ) xcrun --sdk "${sdk}" cmake --build "${build_dir}/${arch}" --target install-cxx -- -v done step "Creating a universal dylib from the dylibs for all architectures" input_dylibs=$(for arch in ${architectures}; do echo "${build_dir}/${arch}-install/lib/${dylib_name}" done) xcrun --sdk "${sdk}" lipo -create ${input_dylibs} -output "${build_dir}/${dylib_name}" step "Installing the (stripped) universal dylib to ${install_dir}/usr/lib" mkdir -p "${install_dir}/usr/lib" cp "${build_dir}/${dylib_name}" "${install_dir}/usr/lib/${dylib_name}" xcrun --sdk "${sdk}" strip -S "${install_dir}/usr/lib/${dylib_name}" if [[ "${make_symlink}" == "yes" ]]; then (cd "${install_dir}/usr/lib" && ln -s "${dylib_name}" libc++.dylib) fi step "Installing the unstripped dylib and the dSYM bundle to ${symbols_dir}" xcrun --sdk "${sdk}" dsymutil "${build_dir}/${dylib_name}" -o "${symbols_dir}/${dylib_name}.dSYM" cp "${build_dir}/${dylib_name}" "${symbols_dir}/${dylib_name}" # # Install the headers by copying the headers from one of the built architectures # into the install directory. Headers from all architectures should be the same. # any_arch=$(echo ${architectures} | cut -d ' ' -f 1) mkdir -p "${install_dir}/usr/include" ditto "${build_dir}/${any_arch}-install/include" "${install_dir}/usr/include"
endlessm/chromium-browser
third_party/llvm/libcxx/utils/ci/apple-install-libcxx.sh
Shell
bsd-3-clause
5,164
#!/bin/bash # # ======= TEMPLATE GAMS-CPLEX Header ======== # No printf parameters # # Simple BASH script to run and time a series of GAMS jobs to compare the run # time of binary vs clustered unit commitment both with and without capacity # expansion decisions # # To actually submit the job use: # qsub SCRIPT_NAME # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space #========= Setup Job Queue Parameters ========== # IMPORTANT: The lines beginning #PBS set various queuing parameters, they are not simple comments # # name of submitted job, also name of output file unless specified # The default job name is the name of this script, so here we surpress the job naming so # we get unique names for all of our jobs ##PBS -N matlab_pbs # # Ask for all 1 node with 8 processors. this may or may not give # exclusive access to a machine, but typically the queueing system will # assign the 8 core machines first # # By requiring 20GB we ensure we get one of the machines with 24GB (or maybe a 12 core unit) #PBS -l nodes=1:ppn=8,mem=20gb # # This option merges any error messages into output file #PBS -j oe # # Select the queue based on maximum run times. options are: # short 2hr # medium 8hr # long 24hr # xlong 48hr, extendable to 168hr using -l walltime= option below #PBS -q long # And up the run time to the maximum of a full week (168 hrs) ##PBS -l walltime=168:00:00 echo "Node list:" cat $PBS_NODEFILE echo "Disk usage:" df -h #Set things up to load modules source /etc/profile.d/modules.sh #Load recent version of GAMS module load gams/23.6.3 #Set path to gams in environment variable so MATLAB can read it GAMS=`which gams` export GAMS #And load CPLEX module load cplex #Establish a working directory in scratch #Will give error if it already exists, but script continues anyway mkdir /scratch/b_p #Clean anything out of our scratch folder (Assumes exclusive machine usage) rm -r /scratch/b_p/* #Make a new subfolder for this job SCRATCH="/scratch/b_p/${PBS_JOBID}" mkdir $SCRATCH #Establish our model directory MODEL_DIR="${HOME}/projects/advpower/models/ops/" #---------------------------- # Setup gams options #---------------------------- DATE_TIME=`date +%y%m%d-%H%M` ADVPOWER_REPO_VER=`svnversion ~/projects/advpower` echo "Date & Time:" ${DATE_TIME} echo "SVN Repository Version:" ${ADVPOWER_REPO_VER} GAMS_MODEL="UnitCommit" #=== END HEADER === #======= Repeated GAMS running Template ======= # Template requires 4 (printf style) substitutions: # string output directory # string run_id # string gams_extra_options # string background task # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space OUT_DIR="${HOME}/projects/advpower/models/ops/out/rts96_1week_1x/" #Make sure output directory exists mkdir ${OUT_DIR} RUN_CODE="RTS_x1_wk_clust_ud_uc5_ch010_m01" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Default GAMS options to: # errmsg: enable in-line description of errors in list file # lf & lo: store the solver log (normally printed to screen) in $OUT_DIR # o: rename the list file and store in $OUT_DIR # inputdir: Look for $include and $batinclude files in $WORK_DIR # And Advanced Power Model options to: # out_dir: specify directory for CSV output files # out_prefix: add a unique run_id to all output files # memo: encode some helpful run information in the summary file # # Plus additional user supplied options pasted into template GAMS_OPTIONS="-errmsg=1 -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -lo=2 -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_run_time_compare_v${ADVPOWER_REPO_VER}_${DATE_TIME} --no_nse=1 --par_threads=1 --startup=1 --ramp=1 --out_gen_params=1 --max_solve_time=36000 --sys=ieee_rts96_sys.inc --gens=ieee_rts96_gens.inc --demand=ieee_rts96_dem_wk.inc --demscale=0.92 --min_up_down=1 --uc_ignore_unit_min=5 --rsrv=separate --pwl_cost=1 --rel_cheat=0.001 --mip_gap=0.001 " #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Repeated GAMS running Template ======= # Template requires 4 (printf style) substitutions: # string output directory # string run_id # string gams_extra_options # string background task # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space OUT_DIR="${HOME}/projects/advpower/models/ops/out/rts96_1week_1x/" #Make sure output directory exists mkdir ${OUT_DIR} RUN_CODE="RTS_x1_wk_sep_ud_uc5_ch010_m01" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Default GAMS options to: # errmsg: enable in-line description of errors in list file # lf & lo: store the solver log (normally printed to screen) in $OUT_DIR # o: rename the list file and store in $OUT_DIR # inputdir: Look for $include and $batinclude files in $WORK_DIR # And Advanced Power Model options to: # out_dir: specify directory for CSV output files # out_prefix: add a unique run_id to all output files # memo: encode some helpful run information in the summary file # # Plus additional user supplied options pasted into template GAMS_OPTIONS="-errmsg=1 -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -lo=2 -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_run_time_compare_v${ADVPOWER_REPO_VER}_${DATE_TIME} --no_nse=1 --par_threads=1 --startup=1 --ramp=1 --out_gen_params=1 --max_solve_time=36000 --sys=ieee_rts96_sys.inc --gens=ieee_rts96_gens_sepunit.inc --demand=ieee_rts96_dem_wk.inc --demscale=0.92 --min_up_down=1 --uc_ignore_unit_min=5 --rsrv=separate --pwl_cost=1 --rel_cheat=0.001 --mip_gap=0.001 " #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Repeated GAMS running Template ======= # Template requires 4 (printf style) substitutions: # string output directory # string run_id # string gams_extra_options # string background task # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space OUT_DIR="${HOME}/projects/advpower/models/ops/out/rts96_1week_1x/" #Make sure output directory exists mkdir ${OUT_DIR} RUN_CODE="RTS_x1_wk_clust_ud_uc20_ch010_m01" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Default GAMS options to: # errmsg: enable in-line description of errors in list file # lf & lo: store the solver log (normally printed to screen) in $OUT_DIR # o: rename the list file and store in $OUT_DIR # inputdir: Look for $include and $batinclude files in $WORK_DIR # And Advanced Power Model options to: # out_dir: specify directory for CSV output files # out_prefix: add a unique run_id to all output files # memo: encode some helpful run information in the summary file # # Plus additional user supplied options pasted into template GAMS_OPTIONS="-errmsg=1 -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -lo=2 -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_run_time_compare_v${ADVPOWER_REPO_VER}_${DATE_TIME} --no_nse=1 --par_threads=1 --startup=1 --ramp=1 --out_gen_params=1 --max_solve_time=36000 --sys=ieee_rts96_sys.inc --gens=ieee_rts96_gens.inc --demand=ieee_rts96_dem_wk.inc --demscale=0.92 --min_up_down=1 --uc_ignore_unit_min=20 --rsrv=separate --pwl_cost=1 --rel_cheat=0.001 --mip_gap=0.001 " #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Repeated GAMS running Template ======= # Template requires 4 (printf style) substitutions: # string output directory # string run_id # string gams_extra_options # string background task # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space OUT_DIR="${HOME}/projects/advpower/models/ops/out/rts96_1week_1x/" #Make sure output directory exists mkdir ${OUT_DIR} RUN_CODE="RTS_x1_wk_sep_ud_uc20_ch010_m01" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Default GAMS options to: # errmsg: enable in-line description of errors in list file # lf & lo: store the solver log (normally printed to screen) in $OUT_DIR # o: rename the list file and store in $OUT_DIR # inputdir: Look for $include and $batinclude files in $WORK_DIR # And Advanced Power Model options to: # out_dir: specify directory for CSV output files # out_prefix: add a unique run_id to all output files # memo: encode some helpful run information in the summary file # # Plus additional user supplied options pasted into template GAMS_OPTIONS="-errmsg=1 -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -lo=2 -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_run_time_compare_v${ADVPOWER_REPO_VER}_${DATE_TIME} --no_nse=1 --par_threads=1 --startup=1 --ramp=1 --out_gen_params=1 --max_solve_time=36000 --sys=ieee_rts96_sys.inc --gens=ieee_rts96_gens_sepunit.inc --demand=ieee_rts96_dem_wk.inc --demscale=0.92 --min_up_down=1 --uc_ignore_unit_min=20 --rsrv=separate --pwl_cost=1 --rel_cheat=0.001 --mip_gap=0.001 " #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #======= Repeated GAMS running Template ======= # Template requires 4 (printf style) substitutions: # string output directory # string run_id # string gams_extra_options # string background task # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space OUT_DIR="${HOME}/projects/advpower/models/ops/out/rts96_1week_1x/" #Make sure output directory exists mkdir ${OUT_DIR} RUN_CODE="RTS_x1_wk_clust_ud_m01" #Make a temporary run directory in scratch WORK_DIR="${SCRATCH}/tmp_${RUN_CODE}/" mkdir ${WORK_DIR} cp ${MODEL_DIR}${GAMS_MODEL}.gms ${WORK_DIR} cd ${WORK_DIR} echo "${GAMS_MODEL} copied to temporary ${WORK_DIR}" pwd # Default GAMS options to: # errmsg: enable in-line description of errors in list file # lf & lo: store the solver log (normally printed to screen) in $OUT_DIR # o: rename the list file and store in $OUT_DIR # inputdir: Look for $include and $batinclude files in $WORK_DIR # And Advanced Power Model options to: # out_dir: specify directory for CSV output files # out_prefix: add a unique run_id to all output files # memo: encode some helpful run information in the summary file # # Plus additional user supplied options pasted into template GAMS_OPTIONS="-errmsg=1 -lf=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.log -lo=2 -o=${OUT_DIR}${RUN_CODE}_${GAMS_MODEL}.lst -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} --out_prefix=${RUN_CODE}_ --memo=${RUN_CODE}_run_time_compare_v${ADVPOWER_REPO_VER}_${DATE_TIME} --no_nse=1 --par_threads=1 --startup=1 --ramp=1 --out_gen_params=1 --max_solve_time=36000 --sys=ieee_rts96_sys.inc --gens=ieee_rts96_gens.inc --demand=ieee_rts96_dem_wk.inc --demscale=0.92 --min_up_down=1 --rsrv=separate --pwl_cost=1 --mip_gap=0.001 " #Now run GAMS-CPLEX echo "Running ${GAMS_MODEL} using GAMS" echo " Options: ${GAMS_OPTIONS}" echo . gams ${GAMS_MODEL} ${GAMS_OPTIONS} & echo "GAMS Done (${RUN_CODE})" echo . cd ${MODEL_DIR} pwd #=== Footer Template ==== # No printf parameters # Version History # Ver Date Time Who What # --- ---------- ----- -------------- --------------------------------- # 1 2011-10-08 04:20 bpalmintier Adapted from pbs_time1.sh v4 # 2 2011-10-08 21:00 bpalmintier Implemented use of scratch space #Wait until all background jobs are complete wait #See how much disk space we used df -h #Clean-up scratch space echo "Cleaning up our Scratch Space" cd rm -r /scratch/b_p/* df -h echo "Script Complete ${PBS_JOBID}"
bpalmintier/mepo
models/config/rts96_wk_3.sh
Shell
bsd-3-clause
14,336
#!/usr/bin/env bash PYTHONPATH=.:$PYTHONPATH python2 -m nose -v pyltr
jma127/pyltr
runtests_py2.sh
Shell
bsd-3-clause
71
#!/bin/bash # Batch qsub submission script for testing gradient update values for one pair of 3 layer and one pair of 5 layer models offset=0 outputLayer=$1 first="SdA_900_300_$outputLayer.pkl" second="SdA_500_300_$outputLayer.pkl" qsub submit_test_gradient_gravity_3layers.sh -v FIRSTMODEL="$first",SECONDMODEL="$second",OFFSET="$offset" ((offset+=5)) sleep 5 first="SdA_700_700_400_200_$outputLayer.pkl" second="SdA_800_900_500_300_$outputLayer.pkl" qsub submit_test_gradient_gravity_5layers.sh -v FIRSTMODEL="$first",SECONDMODEL="$second",OFFSET="$offset" sleep 5
lzamparo/SdA_reduce
utils/tests/test_gradient_updates.sh
Shell
bsd-3-clause
576
#!/bin/bash set -e repo="$1" distro="$2" mirror="$3" if [ ! "$repo" ] || [ ! "$distro" ]; then self="$(basename $0)" echo >&2 "usage: $self repo distro [mirror]" echo >&2 echo >&2 " ie: $self username/centos centos-5" echo >&2 " $self username/centos centos-6" echo >&2 echo >&2 " ie: $self username/slc slc-5" echo >&2 " $self username/slc slc-6" echo >&2 echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" echo >&2 echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' echo >&2 ' expected values of "mirror".' echo >&2 echo >&2 'This script is tested to work with the original upstream version of rinse,' echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' echo >&2 exit 1 fi target="/tmp/docker-rootfs-rinse-$distro-$$-$RANDOM" cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" returnTo="$(pwd -P)" rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) if [ "$mirror" ]; then rinseArgs+=( --mirror "$mirror" ) fi set -x mkdir -p "$target" sudo rinse "${rinseArgs[@]}" cd "$target" # rinse fails a little at setting up /dev, so we'll just wipe it out and create our own sudo rm -rf dev sudo mkdir -m 755 dev ( cd dev sudo ln -sf /proc/self/fd ./ sudo mkdir -m 755 pts sudo mkdir -m 1777 shm sudo mknod -m 600 console c 5 1 sudo mknod -m 600 initctl p sudo mknod -m 666 full c 1 7 sudo mknod -m 666 null c 1 3 sudo mknod -m 666 ptmx c 5 2 sudo mknod -m 666 random c 1 8 sudo mknod -m 666 tty c 5 0 sudo mknod -m 666 tty0 c 4 0 sudo mknod -m 666 urandom c 1 9 sudo mknod -m 666 zero c 1 5 ) # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" # locales sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} # docs sudo rm -rf usr/share/{man,doc,info,gnome/help} # cracklib sudo rm -rf usr/share/cracklib # i18n sudo rm -rf usr/share/i18n # yum cache sudo rm -rf var/cache/yum sudo mkdir -p --mode=0755 var/cache/yum # sln sudo rm -rf sbin/sln # ldconfig #sudo rm -rf sbin/ldconfig sudo rm -rf etc/ld.so.cache var/cache/ldconfig sudo mkdir -p --mode=0755 var/cache/ldconfig # allow networking init scripts inside the container to work without extra steps echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null ## create a nice login bash shell cat >| ../.bashrc <<EOF # Personal aliases and functions. # Personal environment variables and startup programs should go in # ~/.bash_profile. System wide environment variables and startup # programs are in /etc/profile. System wide aliases and functions are # in /etc/bashrc. if [ -f "/etc/bashrc" ] ; then source /etc/bashrc fi EOF sudo mv ../.bashrc root/. cat >| ../.bash_profile <<EOF # Personal environment variables and startup programs. # Personal aliases and functions should go in ~/.bashrc. System wide # environment variables and startup programs are in /etc/profile. # System wide aliases and functions are in /etc/bashrc. if [ -f "$HOME/.bashrc" ] ; then source $HOME/.bashrc fi export PYTHONSTARTUP=/root/.pythonrc.py EOF sudo mv ../.bash_profile root/. ## for a nice python prompt cat >| ../.pythonrc.py <<EOF ## ## for tab-completion ## import rlcompleter, readline readline.parse_and_bind('tab: complete') readline.parse_and_bind( 'set show-all-if-ambiguous On' ) ## ## for history ## import os, atexit histfile = os.path.join(os.environ["HOME"], ".python_history") try: readline.read_history_file(histfile) except IOError: pass atexit.register(readline.write_history_file, histfile) del os, atexit, histfile del readline EOF sudo mv ../.pythonrc.py root/. # to restore locales later: # yum reinstall glibc-common version= if [ -r etc/redhat-release ]; then version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" elif [ -r etc/SuSE-release ]; then version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" fi if [ -z "$version" ]; then echo >&2 "warning: cannot autodetect OS version, using $distro as tag" sleep 20 version="$distro" fi sudo tar --numeric-owner -c . | docker import - $repo:$version docker run -i -t $repo:$version echo success cd "$returnTo" sudo rm -rf "$target"
atlas-org/docker-containers
slc/mkimage-rinse.sh
Shell
bsd-3-clause
4,728
#!/bin/bash # testOne.sh functional test script # Call with test platform, test-program test-set [,options [,arguments]] # test-set is a char usually 1-9 for the various .out files, usually use 1 for .out1 # e.g. "./testOne.sh pentium hello" # or "./testOne.sh sparc fibo 1 '' 10" # or "./testOne.sh sparc switch_cc 6 '-Td -nG' '2 3 4 5 6'" # Note: options and arguments are quoted strings # $1 = platform $2 = test $3 = test-set $4 = options $5 = parameters to the recompiled executable echo $* > functest.res rm -rf functest/$1/$2 SPACES=" " RES="Result for $1" WHITE=${SPACES:0:(18 - ${#RES})} RES="$RES$WHITE $2:" WHITE=${SPACES:0:(34 - ${#RES})} RES=$RES$WHITE echo -n -e "$RES" sh -c "./boomerang -o functest/$1 $4 test/$1/$2 2>/dev/null >/dev/null" ret=$? if [[ ret -ge 128 ]]; then SIGNAL="signal $((ret-128))" if [ "$SIGNAL" = "signal 9" ]; then SIGNAL="a kill signal"; fi if [ "$SIGNAL" = "signal 11" ]; then SIGNAL="a segmentation fault"; fi if [ "$SIGNAL" = "signal 15" ]; then SIGNAL="a termination signal"; fi RESULT="Boomerang FAILED set $3 with $SIGNAL" else if [[ ! -f functest/$1/$2/$2.c ]]; then RESULT="NO BOOMERANG OUTPUT set $3!" else cat `ls -rt functest/$1/$2/*.c` > functest.c # if test/$1/$2.sed exists, use it to make "known error" corrections to the source code if [[ -f test/$1/$2.sed ]]; then echo Warning... $1/$2.sed used >> functest.res sed -i -f test/$1/$2.sed functest.c ret=$? if [[ ret -ne 0 ]]; then echo test/$1/$2.sed FAILED! >> functest.res exit 10 fi fi gcc -D__size32=int -D__size16=short -D__size8=char -o functest.exe functest.c >> functest.res 2>&1 if [[ $? != 0 ]]; then RESULT="Compile FAILED" else sh -c "./functest.exe $5 > functest.out 2>&1" ret=$? if [[ ret -ge 128 ]]; then SIGNAL="signal $((ret-128))" if [ "$SIGNAL" = "signal 9" ]; then SIGNAL="a kill signal"; fi if [ "$SIGNAL" = "signal 11" ]; then SIGNAL="a segmentation fault"; fi if [ "$SIGNAL" = "signal 15" ]; then SIGNAL="a termination signal"; fi RESULT="EXECUTION TERMINATED with $SIGNAL" else if [[ ret -ne 0 ]]; then echo Warning! return code from execute was $((ret)) >> functest.res fi diff -u test/source/$2.out$3 functest.out >> functest.res ret=$? if [[ ret -ne 0 ]]; then RESULT="FAILED diff set $3" else RESULT="Passed set $3" fi fi fi fi fi grep goto functest.c > /dev/null if [[ $? -eq 0 ]]; then RESULT=$RESULT" (gotos in output)" fi echo $RESULT echo -e "$RES""$RESULT" >> functest.res echo >> functest.res cat functest.res >> functests.out #grep "^Result" functest.res rm -f functest.{res,c,exe,out}
turboencabulator/boomerang
testOne.sh
Shell
bsd-3-clause
2,715
#!/bin/bash scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" . $scriptDir/bash/messages.bash echo -e "Loading dependencies..." curl -O http://getcomposer.org/composer.phar php composer.phar install --dev
LuciaPerez/PUWI
load_deps.bash
Shell
bsd-3-clause
219
set -e AUTH_FILE=`psql -U postgres -c "SHOW hba_file" -At` # md5 authentication sudo sed -i '1s/^/host all crystal_md5 127.0.0.1\/32 md5\n/' ${AUTH_FILE} sudo sed -i '2s/^/host all crystal_md5 ::1\/128 md5\n/' ${AUTH_FILE} sudo service postgresql restart $TRAVIS_POSTGRESQL_VERSION
ucmsky/kemal-sample
lib/pg/.travis/setup.sh
Shell
mit
286
#! /bin/bash git clone --quiet https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG} travis-build > /dev/null; cd travis-build if [ "${TRAVIS_BRANCH}" = "master" ]; then # Forward Integrate git checkout --track -b integration origin/integration > /dev/null; git merge --squash origin/master --no-commit > /dev/null; git commit -m "Travis CI auto-merge of master from build ${TRAVIS_BUILD_NUMBER} - [ci skip]"; npm version patch -m "Travis CI auto-version from build ${TRAVIS_BUILD_NUMBER}"; git push -q origin integration > /dev/null; # update local git fetch; # Reverse Integrate git checkout master > /dev/null; git merge --squash origin/integration --no-commit > /dev/null; git commit -m "Travis CI auto-merge of integration from build ${TRAVIS_BUILD_NUMBER} - [ci skip]"; git push -q origin master > /dev/null; fi if [ "${TRAVIS_BRANCH}" = "stable" ]; then git checkout --track -b stable origin/stable > /dev/null; version=`git diff HEAD^..HEAD -- "$(git rev-parse --show-toplevel)"/package.json | grep '^\+.*version' | sed -s 's/[^0-9\.]//g'` if [ "$version" != "" ]; then git tag -a "v$version" -m "`git log -1 --format=%s`" echo "Created a new tag, v$version" fi git push origin --tags > /dev/null; # Reverse Integrate git checkout --track -b integration origin/integration > /dev/null; git merge --squash origin/stable --no-commit > /dev/null; git commit -m "Travis CI auto-merge of stable from build ${TRAVIS_BUILD_NUMBER} - [ci skip]"; git push -q origin integration > /dev/null; # Reverse Integrate git checkout master > /dev/null; git merge --squash origin/stable --no-commit > /dev/null; git commit -m "Travis CI auto-merge of stable from build ${TRAVIS_BUILD_NUMBER} - [ci skip]"; git push -q origin master > /dev/null; fi cd ..;
DeepElement/winjsrocks-extras
build/repo-cli.sh
Shell
mit
1,823
#!/bin/bash if which nproc > /dev/null; then MAKEOPTS="-j$(nproc)" else MAKEOPTS="-j$(sysctl -n hw.ncpu)" fi ######################################################################################## # general helper functions function ci_gcc_arm_setup { sudo apt-get install gcc-arm-none-eabi libnewlib-arm-none-eabi arm-none-eabi-gcc --version } ######################################################################################## # code formatting function ci_code_formatting_setup { sudo apt-add-repository --yes --update ppa:pybricks/ppa sudo apt-get install uncrustify pip3 install black uncrustify --version black --version } function ci_code_formatting_run { tools/codeformat.py -v } ######################################################################################## # commit formatting function ci_commit_formatting_run { git remote add upstream https://github.com/micropython/micropython.git git fetch --depth=100 upstream master # For a PR, upstream/master..HEAD ends with a merge commit into master, exlude that one. tools/verifygitlog.py -v upstream/master..HEAD --no-merges } ######################################################################################## # code size function ci_code_size_setup { sudo apt-get update sudo apt-get install gcc-multilib gcc --version ci_gcc_arm_setup } function ci_code_size_build { # starts off at either the ref/pull/N/merge FETCH_HEAD, or the current branch HEAD git checkout -b pull_request # save the current location git remote add upstream https://github.com/micropython/micropython.git git fetch --depth=100 upstream master # build reference, save to size0 # ignore any errors with this build, in case master is failing git checkout `git merge-base --fork-point upstream/master pull_request` git show -s tools/metrics.py clean bm tools/metrics.py build bm | tee ~/size0 || true # build PR/branch, save to size1 git checkout pull_request git log upstream/master..HEAD tools/metrics.py clean bm tools/metrics.py build bm | tee ~/size1 } ######################################################################################## # ports/cc3200 function ci_cc3200_setup { ci_gcc_arm_setup } function ci_cc3200_build { make ${MAKEOPTS} -C ports/cc3200 BTARGET=application BTYPE=release make ${MAKEOPTS} -C ports/cc3200 BTARGET=bootloader BTYPE=release } ######################################################################################## # ports/esp32 function ci_esp32_setup_helper { git clone https://github.com/espressif/esp-idf.git git -C esp-idf checkout $1 git -C esp-idf submodule update --init \ components/bt/host/nimble/nimble \ components/esp_wifi \ components/esptool_py/esptool \ components/lwip/lwip \ components/mbedtls/mbedtls if [ -d esp-idf/components/bt/controller/esp32 ]; then git -C esp-idf submodule update --init \ components/bt/controller/lib_esp32 \ components/bt/controller/lib_esp32c3_family else git -C esp-idf submodule update --init \ components/bt/controller/lib fi ./esp-idf/install.sh } function ci_esp32_idf402_setup { ci_esp32_setup_helper v4.0.2 } function ci_esp32_idf44_setup { # This commit is just before v5.0-dev ci_esp32_setup_helper 142bb32c50fa9875b8b69fa539a2d59559460d72 } function ci_esp32_build { source esp-idf/export.sh make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/esp32 submodules make ${MAKEOPTS} -C ports/esp32 USER_C_MODULES=../../../examples/usercmodule/micropython.cmake FROZEN_MANIFEST=$(pwd)/ports/esp32/boards/manifest.py if [ -d $IDF_PATH/components/esp32c3 ]; then make ${MAKEOPTS} -C ports/esp32 BOARD=GENERIC_C3 fi if [ -d $IDF_PATH/components/esp32s2 ]; then make ${MAKEOPTS} -C ports/esp32 BOARD=GENERIC_S2 fi if [ -d $IDF_PATH/components/esp32s3 ]; then make ${MAKEOPTS} -C ports/esp32 BOARD=GENERIC_S3 fi } ######################################################################################## # ports/esp8266 function ci_esp8266_setup { sudo pip install pyserial esptool wget https://github.com/jepler/esp-open-sdk/releases/download/2018-06-10/xtensa-lx106-elf-standalone.tar.gz zcat xtensa-lx106-elf-standalone.tar.gz | tar x # Remove this esptool.py so pip version is used instead rm xtensa-lx106-elf/bin/esptool.py } function ci_esp8266_path { echo $(pwd)/xtensa-lx106-elf/bin } function ci_esp8266_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/esp8266 submodules make ${MAKEOPTS} -C ports/esp8266 make ${MAKEOPTS} -C ports/esp8266 BOARD=GENERIC_512K make ${MAKEOPTS} -C ports/esp8266 BOARD=GENERIC_1M } ######################################################################################## # ports/javascript function ci_javascript_setup { git clone https://github.com/emscripten-core/emsdk.git (cd emsdk && ./emsdk install latest && ./emsdk activate latest) } function ci_javascript_build { source emsdk/emsdk_env.sh make ${MAKEOPTS} -C ports/javascript } function ci_javascript_run_tests { # This port is very slow at running, so only run a few of the tests. (cd tests && MICROPY_MICROPYTHON=../ports/javascript/node_run.sh ./run-tests.py -j1 basics/builtin_*.py) } ######################################################################################## # ports/mimxrt function ci_mimxrt_setup { ci_gcc_arm_setup } function ci_mimxrt_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/mimxrt submodules make ${MAKEOPTS} -C ports/mimxrt BOARD=MIMXRT1020_EVK make ${MAKEOPTS} -C ports/mimxrt BOARD=TEENSY40 } ######################################################################################## # ports/nrf function ci_nrf_setup { ci_gcc_arm_setup } function ci_nrf_build { ports/nrf/drivers/bluetooth/download_ble_stack.sh s140_nrf52_6_1_1 make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/nrf submodules make ${MAKEOPTS} -C ports/nrf BOARD=pca10040 make ${MAKEOPTS} -C ports/nrf BOARD=microbit make ${MAKEOPTS} -C ports/nrf BOARD=pca10056 SD=s140 make ${MAKEOPTS} -C ports/nrf BOARD=pca10090 } ######################################################################################## # ports/powerpc function ci_powerpc_setup { sudo apt-get update sudo apt-get install gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross } function ci_powerpc_build { make ${MAKEOPTS} -C ports/powerpc UART=potato make ${MAKEOPTS} -C ports/powerpc UART=lpc_serial } ######################################################################################## # ports/qemu-arm function ci_qemu_arm_setup { ci_gcc_arm_setup sudo apt-get update sudo apt-get install qemu-system qemu-system-arm --version } function ci_qemu_arm_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/qemu-arm CFLAGS_EXTRA=-DMP_ENDIANNESS_BIG=1 make ${MAKEOPTS} -C ports/qemu-arm clean make ${MAKEOPTS} -C ports/qemu-arm -f Makefile.test test make ${MAKEOPTS} -C ports/qemu-arm -f Makefile.test clean make ${MAKEOPTS} -C ports/qemu-arm -f Makefile.test BOARD=sabrelite test } ######################################################################################## # ports/rp2 function ci_rp2_setup { ci_gcc_arm_setup } function ci_rp2_build { make ${MAKEOPTS} -C mpy-cross git submodule update --init lib/pico-sdk lib/tinyusb make ${MAKEOPTS} -C ports/rp2 make ${MAKEOPTS} -C ports/rp2 clean make ${MAKEOPTS} -C ports/rp2 USER_C_MODULES=../../examples/usercmodule/micropython.cmake } ######################################################################################## # ports/samd function ci_samd_setup { ci_gcc_arm_setup } function ci_samd_build { make ${MAKEOPTS} -C ports/samd submodules make ${MAKEOPTS} -C ports/samd } ######################################################################################## # ports/stm32 function ci_stm32_setup { ci_gcc_arm_setup pip3 install pyhy } function ci_stm32_pyb_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/stm32 submodules git submodule update --init lib/btstack git submodule update --init lib/mynewt-nimble make ${MAKEOPTS} -C ports/stm32 BOARD=PYBV11 MICROPY_PY_WIZNET5K=5200 MICROPY_PY_CC3K=1 USER_C_MODULES=../../examples/usercmodule make ${MAKEOPTS} -C ports/stm32 BOARD=PYBD_SF2 make ${MAKEOPTS} -C ports/stm32 BOARD=PYBD_SF6 NANBOX=1 MICROPY_BLUETOOTH_NIMBLE=0 MICROPY_BLUETOOTH_BTSTACK=1 make ${MAKEOPTS} -C ports/stm32/mboot BOARD=PYBV10 CFLAGS_EXTRA='-DMBOOT_FSLOAD=1 -DMBOOT_VFS_LFS2=1' make ${MAKEOPTS} -C ports/stm32/mboot BOARD=PYBD_SF6 } function ci_stm32_nucleo_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/stm32 submodules git submodule update --init lib/mynewt-nimble make ${MAKEOPTS} -C ports/stm32 BOARD=NUCLEO_F091RC make ${MAKEOPTS} -C ports/stm32 BOARD=NUCLEO_H743ZI CFLAGS_EXTRA='-DMICROPY_PY_THREAD=1' make ${MAKEOPTS} -C ports/stm32 BOARD=NUCLEO_L073RZ make ${MAKEOPTS} -C ports/stm32 BOARD=NUCLEO_L476RG DEBUG=1 make ${MAKEOPTS} -C ports/stm32 BOARD=NUCLEO_WB55 make ${MAKEOPTS} -C ports/stm32/mboot BOARD=NUCLEO_WB55 # Test mboot_pack_dfu.py created a valid file, and that its unpack-dfu command works. BOARD_WB55=ports/stm32/boards/NUCLEO_WB55 BUILD_WB55=ports/stm32/build-NUCLEO_WB55 python3 ports/stm32/mboot/mboot_pack_dfu.py -k $BOARD_WB55/mboot_keys.h unpack-dfu $BUILD_WB55/firmware.pack.dfu $BUILD_WB55/firmware.unpack.dfu diff $BUILD_WB55/firmware.unpack.dfu $BUILD_WB55/firmware.dfu # Test unpack-dfu command works without a secret key tail -n +2 $BOARD_WB55/mboot_keys.h > $BOARD_WB55/mboot_keys_no_sk.h python3 ports/stm32/mboot/mboot_pack_dfu.py -k $BOARD_WB55/mboot_keys_no_sk.h unpack-dfu $BUILD_WB55/firmware.pack.dfu $BUILD_WB55/firmware.unpack_no_sk.dfu diff $BUILD_WB55/firmware.unpack.dfu $BUILD_WB55/firmware.unpack_no_sk.dfu } ######################################################################################## # ports/teensy function ci_teensy_setup { ci_gcc_arm_setup } function ci_teensy_build { make ${MAKEOPTS} -C ports/teensy } ######################################################################################## # ports/unix CI_UNIX_OPTS_SYS_SETTRACE=( MICROPY_PY_BTREE=0 MICROPY_PY_FFI=0 MICROPY_PY_USSL=0 CFLAGS_EXTRA="-DMICROPY_PY_SYS_SETTRACE=1" ) CI_UNIX_OPTS_SYS_SETTRACE_STACKLESS=( MICROPY_PY_BTREE=0 MICROPY_PY_FFI=0 MICROPY_PY_USSL=0 CFLAGS_EXTRA="-DMICROPY_STACKLESS=1 -DMICROPY_STACKLESS_STRICT=1 -DMICROPY_PY_SYS_SETTRACE=1" ) CI_UNIX_OPTS_QEMU_MIPS=( CROSS_COMPILE=mips-linux-gnu- VARIANT=coverage MICROPY_STANDALONE=1 LDFLAGS_EXTRA="-static" ) CI_UNIX_OPTS_QEMU_ARM=( CROSS_COMPILE=arm-linux-gnueabi- VARIANT=coverage MICROPY_STANDALONE=1 ) function ci_unix_build_helper { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/unix "$@" submodules make ${MAKEOPTS} -C ports/unix "$@" deplibs make ${MAKEOPTS} -C ports/unix "$@" } function ci_unix_build_ffi_lib_helper { $1 $2 -shared -o tests/unix/ffi_lib.so tests/unix/ffi_lib.c } function ci_unix_run_tests_helper { make -C ports/unix "$@" test } function ci_unix_run_tests_full_helper { variant=$1 shift if [ $variant = standard ]; then micropython=micropython else micropython=micropython-$variant fi make -C ports/unix VARIANT=$variant "$@" test_full (cd tests && MICROPY_CPYTHON3=python3 MICROPY_MICROPYTHON=../ports/unix/$micropython ./run-multitests.py multi_net/*.py) } function ci_native_mpy_modules_build { if [ "$1" = "" ]; then arch=x64 else arch=$1 fi make -C examples/natmod/features1 ARCH=$arch make -C examples/natmod/features2 ARCH=$arch make -C examples/natmod/btree ARCH=$arch make -C examples/natmod/framebuf ARCH=$arch make -C examples/natmod/uheapq ARCH=$arch make -C examples/natmod/urandom ARCH=$arch make -C examples/natmod/ure ARCH=$arch make -C examples/natmod/uzlib ARCH=$arch } function ci_native_mpy_modules_32bit_build { ci_native_mpy_modules_build x86 } function ci_unix_minimal_build { make ${MAKEOPTS} -C ports/unix VARIANT=minimal } function ci_unix_minimal_run_tests { (cd tests && MICROPY_CPYTHON3=python3 MICROPY_MICROPYTHON=../ports/unix/micropython-minimal ./run-tests.py -e exception_chain -e self_type_check -e subclass_native_init -d basics) } function ci_unix_standard_build { ci_unix_build_helper VARIANT=standard ci_unix_build_ffi_lib_helper gcc } function ci_unix_standard_run_tests { ci_unix_run_tests_full_helper standard } function ci_unix_standard_run_perfbench { (cd tests && MICROPY_CPYTHON3=python3 MICROPY_MICROPYTHON=../ports/unix/micropython ./run-perfbench.py 1000 1000) } function ci_unix_dev_build { ci_unix_build_helper VARIANT=dev } function ci_unix_dev_run_tests { ci_unix_run_tests_helper VARIANT=dev } function ci_unix_coverage_setup { sudo pip3 install setuptools sudo pip3 install pyelftools gcc --version python3 --version } function ci_unix_coverage_build { ci_unix_build_helper VARIANT=coverage ci_unix_build_ffi_lib_helper gcc } function ci_unix_coverage_run_tests { ci_unix_run_tests_full_helper coverage } function ci_unix_coverage_run_native_mpy_tests { MICROPYPATH=examples/natmod/features2 ./ports/unix/micropython-coverage -m features2 (cd tests && ./run-natmodtests.py "$@" extmod/{btree*,framebuf*,uheapq*,ure*,uzlib*}.py) } function ci_unix_32bit_setup { sudo dpkg --add-architecture i386 sudo apt-get update sudo apt-get install gcc-multilib g++-multilib libffi-dev:i386 sudo pip3 install setuptools sudo pip3 install pyelftools gcc --version python2 --version python3 --version } function ci_unix_coverage_32bit_build { ci_unix_build_helper VARIANT=coverage MICROPY_FORCE_32BIT=1 ci_unix_build_ffi_lib_helper gcc -m32 } function ci_unix_coverage_32bit_run_tests { ci_unix_run_tests_full_helper coverage MICROPY_FORCE_32BIT=1 } function ci_unix_coverage_32bit_run_native_mpy_tests { ci_unix_coverage_run_native_mpy_tests --arch x86 } function ci_unix_nanbox_build { # Use Python 2 to check that it can run the build scripts ci_unix_build_helper PYTHON=python2 VARIANT=nanbox ci_unix_build_ffi_lib_helper gcc -m32 } function ci_unix_nanbox_run_tests { ci_unix_run_tests_full_helper nanbox PYTHON=python2 } function ci_unix_float_build { ci_unix_build_helper VARIANT=standard CFLAGS_EXTRA="-DMICROPY_FLOAT_IMPL=MICROPY_FLOAT_IMPL_FLOAT" ci_unix_build_ffi_lib_helper gcc } function ci_unix_float_run_tests { # TODO get this working: ci_unix_run_tests_full_helper standard CFLAGS_EXTRA="-DMICROPY_FLOAT_IMPL=MICROPY_FLOAT_IMPL_FLOAT" ci_unix_run_tests_helper CFLAGS_EXTRA="-DMICROPY_FLOAT_IMPL=MICROPY_FLOAT_IMPL_FLOAT" } function ci_unix_clang_setup { sudo apt-get install clang clang --version } function ci_unix_stackless_clang_build { make ${MAKEOPTS} -C mpy-cross CC=clang make ${MAKEOPTS} -C ports/unix submodules make ${MAKEOPTS} -C ports/unix CC=clang CFLAGS_EXTRA="-DMICROPY_STACKLESS=1 -DMICROPY_STACKLESS_STRICT=1" } function ci_unix_stackless_clang_run_tests { ci_unix_run_tests_helper CC=clang } function ci_unix_float_clang_build { make ${MAKEOPTS} -C mpy-cross CC=clang make ${MAKEOPTS} -C ports/unix submodules make ${MAKEOPTS} -C ports/unix CC=clang CFLAGS_EXTRA="-DMICROPY_FLOAT_IMPL=MICROPY_FLOAT_IMPL_FLOAT" } function ci_unix_float_clang_run_tests { ci_unix_run_tests_helper CC=clang } function ci_unix_settrace_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/unix "${CI_UNIX_OPTS_SYS_SETTRACE[@]}" } function ci_unix_settrace_run_tests { ci_unix_run_tests_helper "${CI_UNIX_OPTS_SYS_SETTRACE[@]}" } function ci_unix_settrace_stackless_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/unix "${CI_UNIX_OPTS_SYS_SETTRACE_STACKLESS[@]}" } function ci_unix_settrace_stackless_run_tests { ci_unix_run_tests_helper "${CI_UNIX_OPTS_SYS_SETTRACE_STACKLESS[@]}" } function ci_unix_macos_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/unix submodules #make ${MAKEOPTS} -C ports/unix deplibs make ${MAKEOPTS} -C ports/unix # check for additional compiler errors/warnings make ${MAKEOPTS} -C ports/unix VARIANT=dev submodules make ${MAKEOPTS} -C ports/unix VARIANT=dev make ${MAKEOPTS} -C ports/unix VARIANT=coverage submodules make ${MAKEOPTS} -C ports/unix VARIANT=coverage } function ci_unix_macos_run_tests { # Issues with macOS tests: # - OSX has poor time resolution and these uasyncio tests do not have correct output # - import_pkg7 has a problem with relative imports # - urandom_basic has a problem with getrandbits(0) (cd tests && ./run-tests.py --exclude 'uasyncio_(basic|heaplock|lock|wait_task)' --exclude 'import_pkg7.py' --exclude 'urandom_basic.py') } function ci_unix_qemu_mips_setup { sudo apt-get update sudo apt-get install gcc-mips-linux-gnu g++-mips-linux-gnu sudo apt-get install qemu-user qemu-mips --version } function ci_unix_qemu_mips_build { # qemu-mips on GitHub Actions will seg-fault if not linked statically ci_unix_build_helper "${CI_UNIX_OPTS_QEMU_MIPS[@]}" } function ci_unix_qemu_mips_run_tests { # Issues with MIPS tests: # - (i)listdir does not work, it always returns the empty list (it's an issue with the underlying C call) # - ffi tests do not work file ./ports/unix/micropython-coverage (cd tests && MICROPY_MICROPYTHON=../ports/unix/micropython-coverage ./run-tests.py --exclude 'vfs_posix.py' --exclude 'ffi_(callback|float|float2).py') } function ci_unix_qemu_arm_setup { sudo apt-get update sudo apt-get install gcc-arm-linux-gnueabi g++-arm-linux-gnueabi sudo apt-get install qemu-user qemu-arm --version } function ci_unix_qemu_arm_build { ci_unix_build_helper "${CI_UNIX_OPTS_QEMU_ARM[@]}" ci_unix_build_ffi_lib_helper arm-linux-gnueabi-gcc } function ci_unix_qemu_arm_run_tests { # Issues with ARM tests: # - (i)listdir does not work, it always returns the empty list (it's an issue with the underlying C call) export QEMU_LD_PREFIX=/usr/arm-linux-gnueabi file ./ports/unix/micropython-coverage (cd tests && MICROPY_MICROPYTHON=../ports/unix/micropython-coverage ./run-tests.py --exclude 'vfs_posix.py') } ######################################################################################## # ports/windows function ci_windows_setup { sudo apt-get install gcc-mingw-w64 } function ci_windows_build { make ${MAKEOPTS} -C mpy-cross make ${MAKEOPTS} -C ports/windows CROSS_COMPILE=i686-w64-mingw32- } ######################################################################################## # ports/zephyr function ci_zephyr_setup { docker pull zephyrprojectrtos/ci:v0.17.3 docker run --name zephyr-ci -d -it \ -v "$(pwd)":/micropython \ -e ZEPHYR_SDK_INSTALL_DIR=/opt/toolchains/zephyr-sdk-0.12.4 \ -e ZEPHYR_TOOLCHAIN_VARIANT=zephyr \ -e ZEPHYR_BASE=/zephyrproject/zephyr \ -w /micropython/ports/zephyr \ zephyrprojectrtos/ci:v0.17.3 docker ps -a } function ci_zephyr_install { docker exec zephyr-ci west init --mr v2.6.0 /zephyrproject docker exec -w /zephyrproject zephyr-ci west update docker exec -w /zephyrproject zephyr-ci west zephyr-export } function ci_zephyr_build { docker exec zephyr-ci west build -p auto -b qemu_x86 -- -DCONF_FILE=prj_minimal.conf docker exec zephyr-ci west build -p auto -b frdm_k64f -- -DCONF_FILE=prj_minimal.conf docker exec zephyr-ci west build -p auto -b qemu_x86 docker exec zephyr-ci west build -p auto -b frdm_k64f docker exec zephyr-ci west build -p auto -b mimxrt1050_evk docker exec zephyr-ci west build -p auto -b reel_board }
mcauser/micropython
tools/ci.sh
Shell
mit
20,381
alias dcompose='docker-compose' alias docker-vm='screen ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/tty'
nop33/dotfiles
docker/aliases.zsh
Shell
mit
135
#!/bin/bash set -o posix ############################################################################## # These are needed for the C++ Project Template sudo apt-get install -qq cmake doxygen g++-4.9 python-pip cppcheck valgrind ggcov sudo pip install Pygments sudo pip install gcovr sudo pip install cpp-coveralls sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.9 90 sudo update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-4.9 80 ############################################################################## ############################################################################## # Add your projects necessary "install" commands here sudo apt-get install -qq graphviz sudo apt-get install -qq libsdl2-dev libsdl2-image-dev libsdl2-mixer-dev libsdl2-ttf-dev sudo apt-get install -qq libx11-dev libx11-xcb-dev xorg-dev ##############################################################################
CaioIcy/sdl2-engine
utils/travis/install.sh
Shell
mit
937
#!/bin/bash set -o errexit set -o pipefail set -o nounset readonly CITIES_TSV=${CITIES_TSV:-"city_extracts.tsv"} readonly COUNTRIES_TSV=${COUNTRIES_TSV:-"country_extracts.tsv"} readonly WORLD_MBTILES=${WORLD_MBTILES:-"world.mbtiles"} readonly EXTRACT_DIR=$(dirname "$WORLD_MBTILES") readonly PATCH_SRC="$EXTRACT_DIR/world_z0-z5.mbtiles" function main() { if [ ! -f "$WORLD_MBTILES" ]; then echo "$WORLD_MBTILES not found." exit 10 fi local upload_flag='--upload' if [ -z "${S3_ACCESS_KEY}" ]; then upload_flag='' echo 'Skip upload since no S3_ACCESS_KEY was found.' fi # Generate patch sources first but do not upload them python -u create_extracts.py zoom-level "$WORLD_MBTILES" \ --max-zoom=5 --target-dir="$EXTRACT_DIR" python -u create_extracts.py zoom-level "$WORLD_MBTILES" \ --max-zoom=8 --target-dir="$EXTRACT_DIR" python -u create_extracts.py bbox "$WORLD_MBTILES" "$CITIES_TSV" \ --patch-from="$PATCH_SRC" --target-dir="$EXTRACT_DIR" $upload_flag python -u create_extracts.py bbox "$WORLD_MBTILES" "$COUNTRIES_TSV" \ --patch-from="$PATCH_SRC" --target-dir="$EXTRACT_DIR" $upload_flag } main
geometalab/osm2vectortiles
src/create-extracts/create-extracts.sh
Shell
mit
1,211
#!/usr/bin/env bash prompt_command() { # Local or SSH session? local remote="" [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ] && remote=1 # set the user-color local user_color=$COLOR_LIGHT_GREEN # user's color [ $UID -eq 0 ] && user_color=$COLOR_RED # root's color # set the user local user="" if [[ -z $CONFIG_DEFAULT_USER ]] || [[ "$CONFIG_DEFAULT_USER" != "$USER" ]]; then user=$USER fi # set the hostname inside SSH session local host="" [ -n "$remote" ] && host="\[$COLOR_LIGHT_GREEN\]${ICON_FOR_AT}\h" # set extra ":" after user || host local userOrHostExtra="" if [[ -n "$host" ]] || [[ -n "$user" ]]; then userOrHostExtra="\[$user_color\]:" fi local isCygwinMings=false [[ $SYSTEM_TYPE == "CYGWIN" || $SYSTEM_TYPE == "MINGW" ]] && isCygwinMings=true if [[ "$(tty)" == /dev/pts/* ]] || $isCygwinMings; then if [[ -n $remote ]] && [[ $COLORTERM = gnome-* && $TERM = xterm ]] && infocmp gnome-256color >/dev/null 2>&1; then export TERM='gnome-256color' elif infocmp xterm-256color >/dev/null 2>&1; then export TERM='xterm-256color' elif $isCygwinMings ; then export TERM='xterm-256color' fi fi local scm if command -v svn > /dev/null 2>&1; then scm="\$(__svn_branch)" fi # Terminal title local TITLE="" # echo title sequence only for pseudo terminals # real tty do not support title escape sequences. if [[ "$(tty)" == /dev/pts/* ]] || $isCygwinMings; then TITLE="\[\033]0;${USER}@${HOSTNAME}: \w\007\]" fi # INFO: Text (commands) inside \[...\] does not impact line length calculation which fixes stange bug when looking through the history # $? is a status of last command, should be processed every time prompt prints # Format prompt export PS1="${TITLE}\`if [ \$? -eq 0 ]; then echo -e \[\$COLOR_GREEN\]\${ICON_FOR_TRUE}; else echo -e \[\$COLOR_RED\]\${ICON_FOR_FALSE}; fi\` \[$user_color\]${user}${host}${userOrHostExtra}\[$COLOR_LIGHT_BLUE\]\w\[$COLOR_LIGHT_RED\]${ICON_FOR_ARROW_RIGHT}\[$COLOR_LIGHT_PURPLE\]${scm}\[$COLOR_NO_COLOUR\] " # Multiline command export PS2="\[$COLOR_LIGHT_RED\]${ICON_FOR_ARROW_RIGHT}\[$COLOR_NO_COLOUR\]" # Restore the original prompt for select menus. This is unset initially and # seems to default to "#? ". unset PS3; } safe_append_prompt_command prompt_command
voku/dotfiles
.redpill/themes/voku/voku.theme.bash
Shell
mit
2,369
fswatch -o . | xargs -n1 -I{} ./bin/sync_rubyx.sh
ruby-x/rubyx
bin/watch.sh
Shell
mit
50
#!/bin/bash FN="LAPOINTE.db_3.2.3.tar.gz" URLS=( "https://bioconductor.org/packages/3.11/data/annotation/src/contrib/LAPOINTE.db_3.2.3.tar.gz" "https://bioarchive.galaxyproject.org/LAPOINTE.db_3.2.3.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-lapointe.db/bioconductor-lapointe.db_3.2.3_src_all.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-lapointe.db/bioconductor-lapointe.db_3.2.3_src_all.tar.gz" ) MD5="434b25ad7411201d8be6bb1a0463b387" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do curl $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
roryk/recipes
recipes/bioconductor-lapointe.db/post-link.sh
Shell
mit
1,419
#!/bin/bash # This is a stand in script until Armadillo is ready. # Remove roles directory if it exists if hash trash 2>/dev/null; then trash provisioning/roles else rm -rf provisioning/roles fi # Create roles directory mkdir -p provisioning/roles # Clone repos # (Note: Dependencies are currently manually tracked, Armadillo will handle this automatically) git clone --depth 1 --branch v0.1.2 ssh://[email protected]:7999/barc/bootstrap.git provisioning/roles/bootstrap git clone --depth 1 --branch v0.4.3 ssh://[email protected]:7999/barc/core.git provisioning/roles/core git clone --depth 1 --branch v0.2.4 ssh://[email protected]:7999/barc/php.git provisioning/roles/php git clone --depth 1 --branch v0.1.2 ssh://[email protected]:7999/barc/git.git provisioning/roles/git git clone --depth 1 --branch v0.1.4 ssh://[email protected]:7999/barc/composer.git provisioning/roles/composer
antarctica/laravel-token-blacklist
armadillo_standin.sh
Shell
mit
912
#!/usr/bin/env bash set -e set -u UNAME=$(uname) ARCH=$(uname -m) NODE_VERSION=14.17.6 MONGO_VERSION_64BIT=4.4.4 MONGO_VERSION_32BIT=3.2.22 NPM_VERSION=6.14.15 if [ "$UNAME" == "Linux" ] ; then if [ "$ARCH" != "i686" -a "$ARCH" != "x86_64" ] ; then echo "Unsupported architecture: $ARCH" echo "Meteor only supports i686 and x86_64 for now." exit 1 fi OS="linux" stripBinary() { strip --remove-section=.comment --remove-section=.note $1 } elif [ "$UNAME" == "Darwin" ] ; then SYSCTL_64BIT=$(sysctl -n hw.cpu64bit_capable 2>/dev/null || echo 0) if [ "$ARCH" == "i386" -a "1" != "$SYSCTL_64BIT" ] ; then # some older macos returns i386 but can run 64 bit binaries. # Probably should distribute binaries built on these machines, # but it should be OK for users to run. ARCH="x86_64" fi if [ "$ARCH" != "x86_64" ] ; then echo "Unsupported architecture: $ARCH" echo "Meteor only supports x86_64 for now." exit 1 fi OS="macos" # We don't strip on Mac because we don't know a safe command. (Can't strip # too much because we do need node to be able to load objects like # fibers.node.) stripBinary() { true } else echo "This OS not yet supported" exit 1 fi PLATFORM="${UNAME}_${ARCH}" if [ "$UNAME" == "Linux" ] then if [ "$ARCH" == "i686" ] then NODE_TGZ="node-v${NODE_VERSION}-linux-x86.tar.gz" elif [ "$ARCH" == "x86_64" ] then NODE_TGZ="node-v${NODE_VERSION}-linux-x64.tar.gz" else echo "Unknown architecture: $UNAME $ARCH" exit 1 fi elif [ "$UNAME" == "Darwin" ] then NODE_TGZ="node-v${NODE_VERSION}-darwin-x64.tar.gz" else echo "Unknown architecture: $UNAME $ARCH" exit 1 fi SCRIPTS_DIR=$(dirname $0) cd "$SCRIPTS_DIR/.." CHECKOUT_DIR=$(pwd) DIR=$(mktemp -d -t generate-dev-bundle-XXXXXXXX) trap 'rm -rf "$DIR" >/dev/null 2>&1' 0 cd "$DIR" chmod 755 . umask 022 mkdir build cd build
Hansoft/meteor
npm-packages/eslint-plugin-meteor/scripts/build-dev-bundle-common.sh
Shell
mit
2,030
#!/bin/bash FN="pasilla_1.16.0.tar.gz" URLS=( "https://bioconductor.org/packages/3.11/data/experiment/src/contrib/pasilla_1.16.0.tar.gz" "https://bioarchive.galaxyproject.org/pasilla_1.16.0.tar.gz" "https://depot.galaxyproject.org/software/bioconductor-pasilla/bioconductor-pasilla_1.16.0_src_all.tar.gz" ) MD5="28b3db0cb15f42af3cc0c0d57cfefbca" # Use a staging area in the conda dir rather than temp dirs, both to avoid # permission issues as well as to have things downloaded in a predictable # manner. STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM mkdir -p $STAGING TARBALL=$STAGING/$FN SUCCESS=0 for URL in ${URLS[@]}; do curl $URL > $TARBALL [[ $? == 0 ]] || continue # Platform-specific md5sum checks. if [[ $(uname -s) == "Linux" ]]; then if md5sum -c <<<"$MD5 $TARBALL"; then SUCCESS=1 break fi else if [[ $(uname -s) == "Darwin" ]]; then if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then SUCCESS=1 break fi fi fi done if [[ $SUCCESS != 1 ]]; then echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:" printf '%s\n' "${URLS[@]}" exit 1 fi # Install and clean up R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL rm $TARBALL rmdir $STAGING
roryk/recipes
recipes/bioconductor-pasilla/post-link.sh
Shell
mit
1,287
#! /usr/bin/env zsh dot="$(cd "$(dirname "$0")"; pwd)" source "$dot/radar-base.sh" if is_repo; then autoload colors && colors printf '%s' "%{$fg_bold[black]%} git:(%{$reset_color%}" zsh_color_remote_commits printf '%s' "%{$fg[white]%}" readable_branch_name printf '%s' "%{$reset_color%}" zsh_color_local_commits printf '%s' "%{$fg_bold[black]%})%{$reset_color%}" zsh_color_changes_status fi
fordhurley/git-radar
prompt.zsh
Shell
mit
411
#!/bin/sh cwd=$(pwd) cd .. rm -rf cordova-test-app git clone https://github.com/hypery2k/cordova-demo-app.git cordova-test-app cd cordova-test-app npm install bower install grunt "ci:$PLATFORM" "$PLATFORM" cordova plugin add ../cordova-certificate-plugin/ echo "Changing back to plugin directy: "$cwd cd $cwd
danjarvis/cordova-plugin-crosswalk-certificate
runIntegrationTests.sh
Shell
mit
310
#Retrieves the test file from the server curl -k http://localhost:2001/samplemessage.txt
matthewmccullough/encryption-jvm-bootcamp
jsse-sample-server-sun/getfilePLAIN.sh
Shell
mit
88
#!/bin/sh chown root:root RECOVERY-PAYLOAD -R cd RECOVERY-PAYLOAD rm -f ../recovery.tar.xz tar -cvJ --xz . > ../recovery.tar.xz stat ../recovery.tar.xz cd ..
lexmar93/Siyah-i777
compress-recovery-payload.sh
Shell
gpl-2.0
159
#! /bin/sh ### Set verbose mode test "x$VERBOSE" = "xx" && set -x # filter # zero by default ../src/seqExclude -o tmp.out $srcdir/test.fa 2> tmp.err || exit 1 diff $srcdir/test.fa tmp.out > /dev/null || exit 1 # zero set ../src/seqExclude -l 0 -o tmp.out $srcdir/test.fa 2> tmp.err || exit 1 diff $srcdir/test.fa tmp.out > /dev/null || exit 1 ../src/seqExclude -l 160 -o tmp.out $srcdir/test.fa 2> tmp.err || exit 1 diff $srcdir/filter_160.out tmp.out > /dev/null || exit 1 diff $srcdir/filter_160.err tmp.err > /dev/null || exit 1 ## Clean rm -f tmp.* exit 0
C3BI-pasteur-fr/seqExclude
tests/filter.sh
Shell
gpl-2.0
571
#!/system/bin/sh # Copyright (c) 2012, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # # restart qmuxd, ril-daemon at restarting fraemworks # volddecrypt=`getprop vold.decrypt` baseband=`getprop ro.baseband` sgltecsfb=`getprop persist.radio.sglte_csfb` netmgr=`getprop ro.use_data_netmgrd` multisimconfig=`getprop persist.radio.multisim.config` multisimslotcnt=`getprop ro.multisim.simslotcount` LOG_TAG="qcom-ril-sh" LOG_NAME="${0}:" loge () { /system/bin/log -t $LOG_TAG -p e "$LOG_NAME $@" } logi () { /system/bin/log -t $LOG_TAG -p i "$LOG_NAME $@" } failed () { loge "$1: exit code $2" exit $2 } logi "reason=$volddecrypt" case "$volddecrypt" in "trigger_restart_framework" | "trigger_restart_min_framework") logi "baseband=$baseband netmgr=$netmgr" case "$baseband" in "apq") logi "stop ril-daemon" setprop ro.radio.noril yes stop ril-daemon esac case "$baseband" in "msm" | "csfb" | "svlte2a" | "mdm" | "sglte" | "sglte2" | "dsda2" | "unknown") logi "start qmuxd" start qmuxd case "$baseband" in "csfb" | "svlte2a" | "sglte" | "sglte2") logi "start qmiproxy" start qmiproxy ;; "dsda2") setprop persist.radio.multisim.config dsda esac esac case "$sgltecsfb" in "true") logi "stop qmiproxy" stop qmiproxy setprop persist.radio.voice.modem.index 0 esac case "$netmgr" in "true") logi "start netmgrd" start netmgrd esac logi "multisim=$multisimconfig" case "$multisimconfig" in "dsds") logi "start ril-daemon1" stop ril-daemon start ril-daemon start ril-daemon1 ;; "dsda") logi "start ril-daemon1" stop ril-daemon start ril-daemon start ril-daemon1 ;; "tsts") logi "start ril-daemon1 ril-daemon2" stop ril-daemon start ril-daemon start ril-daemon1 start ril-daemon2 esac case "$multisimslotcnt" in "2") logi "start ril-daemon1" stop ril-daemon start ril-daemon start ril-daemon1 esac esac
Dm47021/android_kernel_afyonlte
zfiles/ramdisk/zero/init.ril.sh
Shell
gpl-2.0
3,784
#!/bin/bash # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Abort on error. set -e # Load common constants and variables. . "$(dirname "$0")/common.sh" usage() { echo "Usage $PROG image [config]" } main() { # We want to catch all the discrepancies, not just the first one. # So, any time we find one, we set testfail=1 and continue. # When finished we will use testfail to determine our exit value. local testfail=0 if [[ $# -ne 1 ]] && [[ $# -ne 2 ]]; then usage exit 1 fi local image="$1" # Default config location: same name/directory as this script, # with a .config file extension, ie ensure_no_nonrelease_files.config. local configfile="$(dirname "$0")/${0/%.sh/.config}" # Or, maybe a config was provided on the command line. if [[ $# -eq 2 ]]; then configfile="$2" fi # Either way, load test-expectations data from config. . "${configfile}" || return 1 local rootfs=$(make_temp_dir) mount_image_partition_ro "${image}" 3 "${rootfs}" # Pick the right set of test-expectation data to use. local boardvar=$(get_boardvar_from_lsb_release "${rootfs}") eval "release_file_blacklist=(\"\${RELEASE_FILE_BLACKLIST_${boardvar}[@]}\")" for file in ${release_file_blacklist}; do if [ -e "${rootfs}/${file}" ]; then error "${file} exists in this image!" ls -al "${rootfs}/${file}" testfail=1 fi done # Verify that session_manager isn't configured to pass additional # environment variables or command-line arguments to Chrome. local config_path="${rootfs}/etc/chrome_dev.conf" local matches=$(grep -s "^[^#]" "${config_path}") if [ -n "${matches}" ]; then error "Found commands in ${config_path}:" error "${matches}" testfail=1 fi exit ${testfail} } main "$@"
u-root/NiChrome
scripts/vboot/bin/ensure_no_nonrelease_files.sh
Shell
gpl-2.0
2,010
#!/bin/bash MONKEEEYS_MOUNT=`find /media/ -type d -name 'monkeeeys' 2>/dev/null` ROOTFS_MOUNT=` find /media/ -type d -name 'rootfs' 2>/dev/null` # Remove whitespaces in directories and file names find . -name "* *" -type d | rename 's/ /_/g' find . -name "* *" -type f | rename 's/ /_/g' # Copy all files to the FAT32 partition mkdir -p $MONKEEEYS_MOUNT/sf2 rsync --progress -r `find sf2/ -iname '*.sf2'` \ $MONKEEEYS_MOUNT/sf2/ rsync --progress -r zImage \ rpi-firmware/* \ assignments.txt \ synth-config.txt \ manual.pdf \ $MONKEEEYS_MOUNT/ unzip liane.zip -d $MONKEEEYS_MOUNT # Copy the firmware to the EXT4 partition sudo tar xvf rootfs.tar \ -C /media/rootfs/
bonnaffe/buildroot
board/monkeeeys-pi/addons/prepare_sd_card.sh
Shell
gpl-2.0
723
TCHAIN=/BUILD/X42/prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi- ./android_clean.sh make ARCH=arm CROSS_COMPILE=$TCHAIN ap33_android_defconfig date '+%Y%m%d%H' > .version make ARCH=arm CROSS_COMPILE=$TCHAIN -j4 make ARCH=arm CROSS_COMPILE=$TCHAIN -C drivers/net/wireless/compat-wireless_R5.SP2.03 KLIB=`pwd` KLIB_BUILD=`pwd` -j2
Alex-V2/Alex-V_SE_OneX
android_compile.sh
Shell
gpl-2.0
342
#!/bin/sh . /lib/netifd/netifd-wireless.sh . /lib/netifd/hostapd.sh init_wireless_driver "$@" MP_CONFIG_INT="mesh_retry_timeout mesh_confirm_timeout mesh_holding_timeout mesh_max_peer_links mesh_max_retries mesh_ttl mesh_element_ttl mesh_hwmp_max_preq_retries mesh_path_refresh_time mesh_min_discovery_timeout mesh_hwmp_active_path_timeout mesh_hwmp_preq_min_interval mesh_hwmp_net_diameter_traversal_time mesh_hwmp_rootmode mesh_hwmp_rann_interval mesh_gate_announcements mesh_sync_offset_max_neighor mesh_rssi_threshold mesh_hwmp_active_path_to_root_timeout mesh_hwmp_root_interval mesh_hwmp_confirmation_interval mesh_awake_window mesh_plink_timeout" MP_CONFIG_BOOL="mesh_auto_open_plinks mesh_fwding" MP_CONFIG_STRING="mesh_power_mode" drv_mac80211_init_device_config() { hostapd_common_add_device_config config_add_string path phy 'macaddr:macaddr' config_add_string hwmode config_add_int beacon_int chanbw frag rts config_add_int rxantenna txantenna antenna_gain txpower distance config_add_boolean noscan config_add_array ht_capab config_add_boolean \ rxldpc \ short_gi_80 \ short_gi_160 \ tx_stbc_2by1 \ su_beamformer \ su_beamformee \ mu_beamformer \ mu_beamformee \ vht_txop_ps \ htc_vht \ rx_antenna_pattern \ tx_antenna_pattern config_add_int vht_max_a_mpdu_len_exp vht_max_mpdu vht_link_adapt vht160 rx_stbc tx_stbc } drv_mac80211_init_iface_config() { hostapd_common_add_bss_config config_add_string 'macaddr:macaddr' ifname config_add_boolean wds powersave config_add_int maxassoc config_add_int max_listen_int config_add_int dtim_interval # mesh config_add_string mesh_id config_add_int $MP_CONFIG_INT config_add_boolean $MP_CONFIG_BOOL config_add_string $MP_CONFIG_STRING } mac80211_add_capabilities() { local __var="$1"; shift local __mask="$1"; shift local __out= oifs oifs="$IFS" IFS=: for capab in "$@"; do set -- $capab [ "$(($4))" -gt 0 ] || continue [ "$(($__mask & $2))" -eq "$((${3:-$2}))" ] || continue __out="$__out[$1]" done IFS="$oifs" export -n -- "$__var=$__out" } mac80211_hostapd_setup_base() { local phy="$1" json_select config [ "$auto_channel" -gt 0 ] && channel=acs_survey json_get_vars noscan htmode json_get_values ht_capab_list ht_capab ieee80211n=1 ht_capab= case "$htmode" in VHT20|HT20) ;; HT40*|VHT40|VHT80|VHT160) case "$hwmode" in a) case "$(( ($channel / 4) % 2 ))" in 1) ht_capab="[HT40+]";; 0) ht_capab="[HT40-]";; esac ;; *) case "$htmode" in HT40+) ht_capab="[HT40+]";; HT40-) ht_capab="[HT40-]";; *) if [ "$channel" -lt 7 ]; then ht_capab="[HT40+]" else ht_capab="[HT40-]" fi ;; esac ;; esac [ "$auto_channel" -gt 0 ] && ht_capab="[HT40+]" ;; *) ieee80211n= ;; esac [ -n "$ieee80211n" ] && { append base_cfg "ieee80211n=1" "$N" json_get_vars \ ldpc:1 \ greenfield:1 \ short_gi_20:1 \ short_gi_40:1 \ tx_stbc:1 \ rx_stbc:3 \ dsss_cck_40:1 ht_cap_mask=0 for cap in $(iw phy "$phy" info | grep 'Capabilities:' | cut -d: -f2); do ht_cap_mask="$(($ht_cap_mask | $cap))" done cap_rx_stbc=$((($ht_cap_mask >> 8) & 3)) [ "$rx_stbc" -lt "$cap_rx_stbc" ] && cap_rx_stbc="$rx_stbc" ht_cap_mask="$(( ($ht_cap_mask & ~(0x300)) | ($cap_rx_stbc << 8) ))" mac80211_add_capabilities ht_capab_flags $ht_cap_mask \ LDPC:0x1::$ldpc \ GF:0x10::$greenfield \ SHORT-GI-20:0x20::$short_gi_20 \ SHORT-GI-40:0x40::$short_gi_40 \ TX-STBC:0x80::$tx_stbc \ RX-STBC1:0x300:0x100:1 \ RX-STBC12:0x300:0x200:1 \ RX-STBC123:0x300:0x300:1 \ DSSS_CCK-40:0x1000::$dsss_cck_40 ht_capab="$ht_capab$ht_capab_flags" [ -n "$ht_capab" ] && append base_cfg "ht_capab=$ht_capab" "$N" } # 802.11ac enable_ac=0 idx="$channel" case "$htmode" in VHT20) enable_ac=1;; VHT40) case "$(( ($channel / 4) % 2 ))" in 1) idx=$(($channel + 2));; 0) idx=$(($channel - 2));; esac enable_ac=1 append base_cfg "vht_oper_chwidth=0" "$N" append base_cfg "vht_oper_centr_freq_seg0_idx=$idx" "$N" ;; VHT80) case "$(( ($channel / 4) % 4 ))" in 1) idx=$(($channel + 6));; 2) idx=$(($channel + 2));; 3) idx=$(($channel - 2));; 0) idx=$(($channel - 6));; esac enable_ac=1 append base_cfg "vht_oper_chwidth=1" "$N" append base_cfg "vht_oper_centr_freq_seg0_idx=$idx" "$N" ;; VHT160) case "$channel" in 36|40|44|48|52|56|60|64) idx=50;; 100|104|108|112|116|120|124|128) idx=114;; esac enable_ac=1 append base_cfg "vht_oper_chwidth=2" "$N" append base_cfg "vht_oper_centr_freq_seg0_idx=$idx" "$N" ;; esac if [ "$enable_ac" != "0" ]; then json_get_vars \ rxldpc:1 \ short_gi_80:1 \ short_gi_160:1 \ tx_stbc_2by1:1 \ su_beamformer:1 \ su_beamformee:1 \ mu_beamformer:1 \ mu_beamformee:1 \ vht_txop_ps:1 \ htc_vht:1 \ rx_antenna_pattern:1 \ tx_antenna_pattern:1 \ vht_max_a_mpdu_len_exp:7 \ vht_max_mpdu:11454 \ rx_stbc:4 \ tx_stbc:4 \ vht_link_adapt:3 \ vht160:2 append base_cfg "ieee80211ac=1" "$N" vht_cap=0 for cap in $(iw phy "$phy" info | awk -F "[()]" '/VHT Capabilities/ { print $2 }'); do vht_cap="$(($vht_cap | $cap))" done cap_rx_stbc=$((($vht_cap >> 8) & 7)) [ "$rx_stbc" -lt "$cap_rx_stbc" ] && cap_rx_stbc="$rx_stbc" ht_cap_mask="$(( ($vht_cap & ~(0x700)) | ($cap_rx_stbc << 8) ))" mac80211_add_capabilities vht_capab $vht_cap \ RXLDPC:0x10::$rxldpc \ SHORT-GI-80:0x20::$short_gi_80 \ SHORT-GI-160:0x40::$short_gi_160 \ TX-STBC-2BY1:0x80::$tx_stbc \ SU-BEAMFORMER:0x800::$su_beamformer \ SU-BEAMFORMEE:0x1000::$su_beamformee \ MU-BEAMFORMER:0x80000::$mu_beamformer \ MU-BEAMFORMEE:0x100000::$mu_beamformee \ VHT-TXOP-PS:0x200000::$vht_txop_ps \ HTC-VHT:0x400000::$htc_vht \ RX-ANTENNA-PATTERN:0x10000000::$rx_antenna_pattern \ TX-ANTENNA-PATTERN:0x20000000::$tx_antenna_pattern \ RX-STBC1:0x700:0x100:1 \ RX-STBC12:0x700:0x200:1 \ RX-STBC123:0x700:0x300:1 \ RX-STBC1234:0x700:0x400:1 \ # supported Channel widths vht160_hw=0 [ "$(($vht_cap & 12))" -eq 4 -a 1 -le "$vht160" ] && \ vht160_hw=1 [ "$(($vht_cap & 12))" -eq 8 -a 2 -le "$vht160" ] && \ vht160_hw=2 [ "$vht160_hw" = 1 ] && vht_capab="$vht_capab[VHT160]" [ "$vht160_hw" = 2 ] && vht_capab="$vht_capab[VHT160-80PLUS80]" # maximum MPDU length vht_max_mpdu_hw=3895 [ "$(($vht_cap & 3))" -ge 1 -a 7991 -le "$vht_max_mpdu" ] && \ vht_max_mpdu_hw=7991 [ "$(($vht_cap & 3))" -ge 2 -a 11454 -le "$vht_max_mpdu" ] && \ vht_max_mpdu_hw=11454 [ "$vht_max_mpdu_hw" != 3895 ] && \ vht_capab="$vht_capab[MAX-MPDU-$vht_max_mpdu_hw]" # maximum A-MPDU length exponent vht_max_a_mpdu_len_exp_hw=0 [ "$(($vht_cap & 58720256))" -ge 8388608 -a 1 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=1 [ "$(($vht_cap & 58720256))" -ge 16777216 -a 2 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=2 [ "$(($vht_cap & 58720256))" -ge 25165824 -a 3 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=3 [ "$(($vht_cap & 58720256))" -ge 33554432 -a 4 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=4 [ "$(($vht_cap & 58720256))" -ge 41943040 -a 5 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=5 [ "$(($vht_cap & 58720256))" -ge 50331648 -a 6 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=6 [ "$(($vht_cap & 58720256))" -ge 58720256 -a 7 -le "$vht_max_a_mpdu_len_exp" ] && \ vht_max_a_mpdu_len_exp_hw=7 vht_capab="$vht_capab[MAX-A-MPDU-LEN-EXP$vht_max_a_mpdu_len_exp_hw]" # whether or not the STA supports link adaptation using VHT variant vht_link_adapt_hw=0 [ "$(($vht_cap & 201326592))" -ge 134217728 -a 2 -le "$vht_link_adapt" ] && \ vht_link_adapt_hw=2 [ "$(($vht_cap & 201326592))" -ge 201326592 -a 3 -le "$vht_link_adapt" ] && \ vht_link_adapt_hw=3 [ "$vht_link_adapt_hw" != 0 ] && \ vht_capab="$vht_capab[VHT-LINK-ADAPT-$vht_link_adapt_hw]" [ -n "$vht_capab" ] && append base_cfg "vht_capab=$vht_capab" "$N" fi hostapd_prepare_device_config "$hostapd_conf_file" nl80211 cat >> "$hostapd_conf_file" <<EOF ${channel:+channel=$channel} ${noscan:+noscan=$noscan} $base_cfg EOF json_select .. } mac80211_hostapd_setup_bss() { local phy="$1" local ifname="$2" local macaddr="$3" local type="$4" hostapd_cfg= append hostapd_cfg "$type=$ifname" "$N" hostapd_set_bss_options hostapd_cfg "$vif" || return 1 json_get_vars wds dtim_period max_listen_int set_default wds 0 [ "$wds" -gt 0 ] && append hostapd_cfg "wds_sta=1" "$N" [ "$staidx" -gt 0 ] && append hostapd_cfg "start_disabled=1" "$N" cat >> /var/run/hostapd-$phy.conf <<EOF $hostapd_cfg bssid=$macaddr ${dtim_period:+dtim_period=$dtim_period} ${max_listen_int:+max_listen_interval=$max_listen_int} EOF } mac80211_generate_mac() { local phy="$1" local id="${macidx:-0}" local ref="$(cat /sys/class/ieee80211/${phy}/macaddress)" local mask="$(cat /sys/class/ieee80211/${phy}/address_mask)" [ "$mask" = "00:00:00:00:00:00" ] && mask="ff:ff:ff:ff:ff:ff"; local oIFS="$IFS"; IFS=":"; set -- $mask; IFS="$oIFS" local mask1=$1 local mask6=$6 local oIFS="$IFS"; IFS=":"; set -- $ref; IFS="$oIFS" macidx=$(($id + 1)) [ "$((0x$mask1))" -gt 0 ] && { b1="0x$1" [ "$id" -gt 0 ] && \ b1=$(($b1 ^ ((($id - 1) << 2) | 0x2))) printf "%02x:%s:%s:%s:%s:%s" $b1 $2 $3 $4 $5 $6 return } [ "$((0x$mask6))" -lt 255 ] && { printf "%s:%s:%s:%s:%s:%02x" $1 $2 $3 $4 $5 $(( 0x$6 ^ $id )) return } off2=$(( (0x$6 + $id) / 0x100 )) printf "%s:%s:%s:%s:%02x:%02x" \ $1 $2 $3 $4 \ $(( (0x$5 + $off2) % 0x100 )) \ $(( (0x$6 + $id) % 0x100 )) } find_phy() { [ -n "$phy" -a -d /sys/class/ieee80211/$phy ] && return 0 [ -n "$path" -a -d "/sys/devices/$path/ieee80211" ] && { phy="$(ls /sys/devices/$path/ieee80211 | grep -m 1 phy)" [ -n "$phy" ] && return 0 } [ -n "$macaddr" ] && { for phy in $(ls /sys/class/ieee80211 2>/dev/null); do grep -i -q "$macaddr" "/sys/class/ieee80211/${phy}/macaddress" && return 0 done } return 1 } mac80211_check_ap() { has_ap=1 } mac80211_prepare_vif() { json_select config json_get_vars ifname mode ssid wds powersave macaddr [ -n "$ifname" ] || ifname="wlan${phy#phy}${if_idx:+-$if_idx}" if_idx=$((${if_idx:-0} + 1)) set_default wds 0 set_default powersave 0 json_select .. [ -n "$macaddr" ] || { macaddr="$(mac80211_generate_mac $phy)" macidx="$(($macidx + 1))" } json_add_object data json_add_string ifname "$ifname" json_close_object json_select config # It is far easier to delete and create the desired interface case "$mode" in adhoc) iw phy "$phy" interface add "$ifname" type adhoc ;; ap) # Hostapd will handle recreating the interface and # subsequent virtual APs belonging to the same PHY if [ -n "$hostapd_ctrl" ]; then type=bss else type=interface fi mac80211_hostapd_setup_bss "$phy" "$ifname" "$macaddr" "$type" || return [ -n "$hostapd_ctrl" ] || { iw phy "$phy" interface add "$ifname" type managed hostapd_ctrl="${hostapd_ctrl:-/var/run/hostapd/$ifname}" } ;; mesh) json_get_vars key mesh_id if [ -n "$key" ]; then iw phy "$phy" interface add "$ifname" type mp else iw phy "$phy" interface add "$ifname" type mp mesh_id "$mesh_id" fi ;; monitor) iw phy "$phy" interface add "$ifname" type monitor ;; sta) local wdsflag= staidx="$(($staidx + 1))" [ "$wds" -gt 0 ] && wdsflag="4addr on" iw phy "$phy" interface add "$ifname" type managed $wdsflag [ "$powersave" -gt 0 ] && powersave="on" || powersave="off" iw "$ifname" set power_save "$powersave" ;; esac case "$mode" in monitor|mesh) [ "$auto_channel" -gt 0 ] || iw dev "$ifname" set channel "$channel" $htmode ;; esac if [ "$mode" != "ap" ]; then # ALL ap functionality will be passed to hostapd # All interfaces must have unique mac addresses # which can either be explicitly set in the device # section, or automatically generated ifconfig "$ifname" hw ether "$macaddr" fi json_select .. } mac80211_setup_supplicant() { wpa_supplicant_prepare_interface "$ifname" nl80211 || return 1 wpa_supplicant_add_network "$ifname" wpa_supplicant_run "$ifname" ${hostapd_ctrl:+-H $hostapd_ctrl} } mac80211_setup_adhoc() { json_get_vars bssid ssid key mcast_rate keyspec= [ "$auth_type" == "wep" ] && { set_default key 1 case "$key" in [1234]) local idx for idx in 1 2 3 4; do json_get_var ikey "key$idx" [ -n "$ikey" ] && { ikey="$(($idx - 1)):$(prepare_key_wep "$ikey")" [ $idx -eq $key ] && ikey="d:$ikey" append keyspec "$ikey" } done ;; *) append keyspec "d:0:$(prepare_key_wep "$key")" ;; esac } brstr= for br in $basic_rate_list; do hostapd_add_rate brstr "$br" done mcval= [ -n "$mcast_rate" ] && hostapd_add_rate mcval "$mcast_rate" iw dev "$ifname" ibss join "$ssid" $freq $htmode fixed-freq $bssid \ ${beacon_int:+beacon-interval $beacon_int} \ ${brstr:+basic-rates $brstr} \ ${mcval:+mcast-rate $mcval} \ ${keyspec:+keys $keyspec} } mac80211_setup_vif() { local name="$1" local failed json_select data json_get_vars ifname json_select .. json_select config json_get_vars mode json_get_var vif_txpower txpower ifconfig "$ifname" up || { wireless_setup_vif_failed IFUP_ERROR json_select .. return } set_default vif_txpower "$txpower" [ -z "$vif_txpower" ] || iw dev "$ifname" set txpower fixed "${vif_txpower%%.*}00" case "$mode" in mesh) for var in $MP_CONFIG_INT $MP_CONFIG_BOOL $MP_CONFIG_STRING; do json_get_var mp_val "$var" [ -n "$mp_val" ] && iw dev "$ifname" set mesh_param "$var" "$mp_val" done # authsae json_get_vars key if [ -n "$key" ]; then if [ -e "/lib/wifi/authsae.sh" ]; then . /lib/wifi/authsae.sh authsae_start_interface || failed=1 else wireless_setup_vif_failed AUTHSAE_NOT_INSTALLED json_select .. return fi fi ;; adhoc) wireless_vif_parse_encryption if [ "$wpa" -gt 0 -o "$auto_channel" -gt 0 ]; then mac80211_setup_supplicant || failed=1 else mac80211_setup_adhoc fi ;; sta) mac80211_setup_supplicant || failed=1 ;; esac json_select .. [ -n "$failed" ] || wireless_add_vif "$name" "$ifname" } get_freq() { local phy="$1" local chan="$2" iw "$phy" info | grep -E -m1 "(\* ${chan:-....} MHz${chan:+|\\[$chan\\]})" | grep MHz | awk '{print $2}' } mac80211_interface_cleanup() { local phy="$1" for wdev in $(list_phy_interfaces "$phy"); do ifconfig "$wdev" down 2>/dev/null iw dev "$wdev" del done } drv_mac80211_cleanup() { hostapd_common_cleanup } drv_mac80211_setup() { json_select config json_get_vars \ phy macaddr path \ country chanbw distance \ txpower antenna_gain \ rxantenna txantenna \ frag rts beacon_int json_get_values basic_rate_list basic_rate json_select .. find_phy || { echo "Could not find PHY for device '$1'" wireless_set_retry 0 return 1 } wireless_set_data phy="$phy" mac80211_interface_cleanup "$phy" # convert channel to frequency [ "$auto_channel" -gt 0 ] || freq="$(get_freq "$phy" "$channel")" [ -n "$country" ] && { iw reg get | grep -q "^country $country:" || { iw reg set "$country" sleep 1 } } hostapd_conf_file="/var/run/hostapd-$phy.conf" no_ap=1 macidx=0 staidx=0 [ -n "$chanbw" ] && { for file in /sys/kernel/debug/ieee80211/$phy/ath9k/chanbw /sys/kernel/debug/ieee80211/$phy/ath5k/bwmode; do [ -f "$file" ] && echo "$chanbw" > "$file" done } set_default rxantenna all set_default txantenna all set_default distance 0 set_default antenna_gain 0 iw phy "$phy" set antenna $txantenna $rxantenna >/dev/null 2>&1 iw phy "$phy" set antenna_gain $antenna_gain iw phy "$phy" set distance "$distance" [ -n "$frag" ] && iw phy "$phy" set frag "${frag%%.*}" [ -n "$rts" ] && iw phy "$phy" set rts "${rts%%.*}" has_ap= hostapd_ctrl= for_each_interface "ap" mac80211_check_ap rm -f "$hostapd_conf_file" [ -n "$has_ap" ] && mac80211_hostapd_setup_base "$phy" for_each_interface "sta adhoc mesh monitor" mac80211_prepare_vif for_each_interface "ap" mac80211_prepare_vif [ -n "$hostapd_ctrl" ] && { /usr/sbin/hostapd -P /var/run/wifi-$phy.pid -B "$hostapd_conf_file" ret="$?" wireless_add_process "$(cat /var/run/wifi-$phy.pid)" "/usr/sbin/hostapd" 1 [ "$ret" != 0 ] && { wireless_setup_failed HOSTAPD_START_FAILED return } } for_each_interface "ap sta adhoc mesh monitor" mac80211_setup_vif wireless_set_up } list_phy_interfaces() { local phy="$1" if [ -d "/sys/class/ieee80211/${phy}/device/net" ]; then ls "/sys/class/ieee80211/${phy}/device/net" 2>/dev/null; else ls "/sys/class/ieee80211/${phy}/device" 2>/dev/null | grep net: | sed -e 's,net:,,g' fi } drv_mac80211_teardown() { wireless_process_kill_all json_select data json_get_vars phy json_select .. mac80211_interface_cleanup "$phy" } add_driver mac80211
tianfy/openwrt-adkill
package/kernel/mac80211/files/lib/netifd/wireless/mac80211.sh
Shell
gpl-2.0
17,176
#!/bin/bash # Copyright (C) Martin Schlemmer <[email protected]> # Copyright (C) 2006 Sam Ravnborg <[email protected]> # # Released under the terms of the GNU GPL # # Generate a cpio packed initramfs. It uses gen_init_cpio to generate # the cpio archive, and then compresses it. # The script may also be used to generate the inputfile used for gen_init_cpio # This script assumes that gen_init_cpio is located in usr/ directory # error out on errors set -e usage() { cat << EOF Usage: $0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ... -o <file> Create compressed initramfs file named <file> using gen_init_cpio and compressor depending on the extension -u <uid> User ID to map to user ID 0 (root). <uid> is only meaningful if <cpio_source> is a directory. "squash" forces all files to uid 0. -g <gid> Group ID to map to group ID 0 (root). <gid> is only meaningful if <cpio_source> is a directory. "squash" forces all files to gid 0. <cpio_source> File list or directory for cpio archive. If <cpio_source> is a .cpio file it will be used as direct input to initramfs. -d Output the default cpio list. All options except -o and -l may be repeated and are interpreted sequentially and immediately. -u and -g states are preserved across <cpio_source> options so an explicit "-u 0 -g 0" is required to reset the root/group mapping. EOF } # awk style field access # $1 - field number; rest is argument string field() { shift $1 ; echo $1 } list_default_initramfs() { # echo usr/kinit/kinit : } default_initramfs() { cat <<-EOF >> ${output} # This is a very simple, default initramfs dir /dev 0755 0 0 nod /dev/console 0600 0 0 c 5 1 dir /root 0700 0 0 # file /kinit usr/kinit/kinit 0755 0 0 # slink /init kinit 0755 0 0 EOF } filetype() { local argv1="$1" # symlink test must come before file test if [ -L "${argv1}" ]; then echo "slink" elif [ -f "${argv1}" ]; then echo "file" elif [ -d "${argv1}" ]; then echo "dir" elif [ -b "${argv1}" -o -c "${argv1}" ]; then echo "nod" elif [ -p "${argv1}" ]; then echo "pipe" elif [ -S "${argv1}" ]; then echo "sock" else echo "invalid" fi return 0 } list_print_mtime() { : } print_mtime() { local my_mtime="0" if [ -e "$1" ]; then my_mtime=$(find "$1" -printf "%T@\n" | sort -r | head -n 1) fi echo "# Last modified: ${my_mtime}" >> ${output} echo "" >> ${output} } list_parse() { [ ! -L "$1" ] && echo "$1 \\" || : } # for each file print a line in following format # <filetype> <name> <path to file> <octal mode> <uid> <gid> # for links, devices etc the format differs. See gen_init_cpio for details parse() { local location="$1" local name="${location/${srcdir}//}" # change '//' into '/' name="${name//\/\///}" local mode="$2" local uid="$3" local gid="$4" local ftype=$(filetype "${location}") # remap uid/gid to 0 if necessary [ "$root_uid" = "squash" ] && uid=0 || [ "$uid" -eq "$root_uid" ] && uid=0 [ "$root_gid" = "squash" ] && gid=0 || [ "$gid" -eq "$root_gid" ] && gid=0 local str="${mode} ${uid} ${gid}" [ "${ftype}" == "invalid" ] && return 0 [ "${location}" == "${srcdir}" ] && return 0 case "${ftype}" in "file") str="${ftype} ${name} ${location} ${str}" ;; "nod") local dev=`LC_ALL=C ls -l "${location}"` local maj=`field 5 ${dev}` local min=`field 6 ${dev}` maj=${maj%,} [ -b "${location}" ] && dev="b" || dev="c" str="${ftype} ${name} ${str} ${dev} ${maj} ${min}" ;; "slink") local target=`readlink "${location}"` str="${ftype} ${name} ${target} ${str}" ;; *) str="${ftype} ${name} ${str}" ;; esac echo "${str}" >> ${output} return 0 } unknown_option() { printf "ERROR: unknown option \"$arg\"\n" >&2 printf "If the filename validly begins with '-', " >&2 printf "then it must be prefixed\n" >&2 printf "by './' so that it won't be interpreted as an option." >&2 printf "\n" >&2 usage >&2 exit 1 } list_header() { : } header() { printf "\n#####################\n# $1\n" >> ${output} } # process one directory (incl sub-directories) dir_filelist() { ${dep_list}header "$1" srcdir=$(echo "$1" | sed -e 's://*:/:g') dirlist=$(find "${srcdir}" -printf "%p %m %U %G\n") # If $dirlist is only one line, then the directory is empty if [ "$(echo "${dirlist}" | wc -l)" -gt 1 ]; then ${dep_list}print_mtime "$1" echo "${dirlist}" | \ while read x; do ${dep_list}parse ${x} done fi } # if only one file is specified and it is .cpio file then use it direct as fs # if a directory is specified then add all files in given direcotry to fs # if a regular file is specified assume it is in gen_initramfs format input_file() { source="$1" if [ -f "$1" ]; then ${dep_list}header "$1" is_cpio="$(echo "$1" | sed 's/^.*\.cpio\(\..*\)\?/cpio/')" if [ $2 -eq 0 -a ${is_cpio} == "cpio" ]; then cpio_file=$1 echo "$1" | grep -q '^.*\.cpio\..*' && is_cpio_compressed="compressed" [ ! -z ${dep_list} ] && echo "$1" return 0 fi if [ -z ${dep_list} ]; then print_mtime "$1" >> ${output} cat "$1" >> ${output} else cat "$1" | while read type dir file perm ; do if [ "$type" == "file" ]; then echo "$file \\"; fi done fi elif [ -d "$1" ]; then dir_filelist "$1" else echo " ${prog}: Cannot open '$1'" >&2 exit 1 fi } prog=$0 root_uid=0 root_gid=0 dep_list= cpio_file= cpio_list= output="/dev/stdout" output_file="" is_cpio_compressed= compr="gzip -9 -f" arg="$1" case "$arg" in "-l") # files included in initramfs - used by kbuild dep_list="list_" echo "deps_initramfs := \\" shift ;; "-o") # generate compressed cpio image named $1 shift output_file="$1" cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)" output=${cpio_list} echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f" echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f" echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f" echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f" echo "$output_file" | grep -q "\.cpio$" && compr="cat" shift ;; esac while [ $# -gt 0 ]; do arg="$1" shift case "$arg" in "-u") # map $1 to uid=0 (root) root_uid="$1" shift ;; "-g") # map $1 to gid=0 (root) root_gid="$1" shift ;; "-d") # display default initramfs list default_list="$arg" ${dep_list}default_initramfs ;; "-h") usage exit 0 ;; *) case "$arg" in "-"*) unknown_option ;; *) # input file/dir - process it input_file "$arg" "$#" ;; esac ;; esac done # If output_file is set we will generate cpio archive and compress it # we are carefull to delete tmp files if [ ! -z ${output_file} ]; then if [ -z ${cpio_file} ]; then cpio_tfile="$(mktemp ${TMPDIR:-/tmp}/cpiofile.XXXXXX)" usr/gen_init_cpio ${cpio_list} > ${cpio_tfile} else cpio_tfile=${cpio_file} fi rm ${cpio_list} if [ "${is_cpio_compressed}" = "compressed" ]; then cat ${cpio_tfile} > ${output_file} else (cat ${cpio_tfile} | ${compr} - > ${output_file}) \ || (rm -f ${output_file} ; false) fi [ -z ${cpio_file} ] && rm ${cpio_tfile} fi exit 0
alfsamsung/semc-kernel-msm7x27-ics
scripts/gen_initramfs_list.sh
Shell
gpl-2.0
7,221
#!/system/bin/sh # Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # export PATH=/system/bin target=`getprop ro.board.platform` # Set platform variables soc_hwplatform=`cat /sys/devices/system/soc/soc0/hw_platform` 2> /dev/null soc_hwid=`cat /sys/devices/system/soc/soc0/id` 2> /dev/null soc_hwver=`cat /sys/devices/system/soc/soc0/platform_version` 2> /dev/null log -t BOOT -p i "MSM target '$target', SoC '$soc_hwplatform', HwID '$soc_hwid', SoC ver '$soc_hwver'" case "$target" in "msm7630_surf" | "msm7630_1x" | "msm7630_fusion") case "$soc_hwplatform" in "FFA" | "SVLTE_FFA") # linking to surf_keypad_qwerty.kcm.bin instead of surf_keypad_numeric.kcm.bin so that # the UI keyboard works fine. ln -s /system/usr/keychars/surf_keypad_qwerty.kcm.bin /system/usr/keychars/surf_keypad.kcm.bin ;; "Fluid") setprop ro.sf.lcd_density 240 setprop qcom.bt.dev_power_class 2 ;; *) ln -s /system/usr/keychars/surf_keypad_qwerty.kcm.bin /system/usr/keychars/surf_keypad.kcm.bin ;; esac ;; "msm8660") case "$soc_hwplatform" in "Fluid") setprop ro.sf.lcd_density 240 ;; "Dragon") setprop ro.sound.alsa "WM8903" ;; esac ;; "msm8960") # lcd density is write-once. Hence the separate switch case case "$soc_hwplatform" in "Liquid") if [ "$soc_hwver" == "196608" ]; then # version 0x30000 is 3D sku setprop ro.sf.hwrotation 90 fi setprop ro.sf.lcd_density 160 ;; "MTP") setprop ro.sf.lcd_density 240 ;; *) case "$soc_hwid" in "142") #8x30 QRD setprop ro.sf.lcd_density 320 ;; "109") setprop ro.sf.lcd_density 160 ;; *) setprop ro.sf.lcd_density 240 ;; esac ;; esac #Set up MSM-specific configuration case "$soc_hwid" in 153 | 154 | 155 | 156 | 157 | 138 | 179 | 180 | 181) #8064 V2 PRIME | 8930AB | 8630AB | 8230AB | 8030AB | 8960AB | 8130/AA/AB setprop debug.composition.type c2d ;; *) ;; esac case "$soc_hwid" in 87 | 116 | 117 | 118 | 119 | 138 | 142 | 143 | 144 | 154 | 155 | 156 | 157 | 179 | 180 | 181) #Disable subsystem restart for 8x30 and 8960 setprop persist.sys.ssr.restart_level 1 ;; *) ;; esac ;; esac # Setup HDMI related nodes & permissions # HDMI can be fb1 or fb2 # Loop through the sysfs nodes and determine # the HDMI(dtv panel) fb_cnt=0 for file in /sys/class/graphics/fb* do value=`cat $file/msm_fb_type` case "$value" in "dtv panel") chown system.graphics $file/hpd chown system.graphics $file/vendor_name chown system.graphics $file/product_description chmod 0664 $file/hpd chmod 0664 $file/vendor_name chmod 0664 $file/product_description chmod 0664 $file/video_mode chmod 0664 $file/format_3d # create symbolic link ln -s "/dev/graphics/fb"$fb_cnt /dev/graphics/hdmi # Change owner and group for media server and surface flinger chown system.system $file/format_3d;; esac fb_cnt=$(( $fb_cnt + 1)) done # Set date to a time after 2008 # This is a workaround for Zygote to preload time related classes properly date -s 20090102.130000
ReflexBow/ghost
ramdisk/init.qcom.early_boot.sh
Shell
gpl-2.0
5,514
#!/bin/sh # For unwritable directory 'd', 'rmdir -p d d/e/f' would emit # diagnostics but would not fail. Fixed in 5.1.2. # Copyright (C) 2004-2017 Free Software Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. . "${srcdir=.}/tests/init.sh"; path_prepend_ ./src print_ver_ rmdir mkdir d d/e d/e/f || framework_failure_ chmod a-w d || framework_failure_ # This rmdir command outputs two diagnostics. # Before coreutils-5.1.2, it would mistakenly exit successfully. # As of coreutils-5.1.2, it fails, as required. returns_ 1 rmdir -p d d/e/f 2> /dev/null || fail=1 Exit $fail
adtools/coreutils
tests/rmdir/fail-perm.sh
Shell
gpl-3.0
1,182
echo `echo hello echo world`
legionus/shell_parser
t/data/posix_0028.sh
Shell
gpl-3.0
29
#!/bin/bash clear echo -e "Traktor Debian Uninstaller v1.0\nTraktor will be automatically Removed with Configured…\n\n" sudo apt purge -y \ tor \ obfs4proxy \ privoxy \ dnscrypt-proxy \ torbrowser-launcher \ apt-transport-tor sudo apt autoremove -y sudo rm -rf /etc/apt/sources.list.d/tor.list sudo apt-key del A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89 sudo sed -i '/dns=none/d' /etc/NetworkManager/NetworkManager.conf gsettings set org.gnome.system.proxy mode 'none' gsettings set org.gnome.system.proxy.http host '' gsettings set org.gnome.system.proxy.http port 0 gsettings set org.gnome.system.proxy.socks host '' gsettings set org.gnome.system.proxy.socks port 0 gsettings set org.gnome.system.proxy ignore-hosts "['localhost', '127.0.0.0/8', '::1']"
ubuntu-ir/traktor
uninstall_debian.sh
Shell
gpl-3.0
770
#! /bin/bash #exec 1> log.txt PYTHONPATH=.:$PYTHONPATH python ~/work/OIST/git/neuromac/Admin.py 16 demo_distant_attraction.cfg & PYTHONPATH=.:$PYTHONPATH python ~/work/OIST/git/neuromac/Admin.py 8 to_pia.cfg &
russellijarvis/neuromac
examples/simultaneous_runs/test.sh
Shell
gpl-3.0
215
#!/bin/sh ### autogen.sh #### (based on the hints from Stackoverflow ;-) ### # Use this script to bootstrap your build AFTER checking it out from # source control. You should not have to use it for anything else. echo "Regenerating autotools files" autoreconf --install || exit 1 echo "Now run ./configure, make, and make install."
mki1967/et-edit
autogen.sh
Shell
gpl-3.0
335
qmake QtOpenSMOKE_PostProcessor.QT5.pro make clean make
acuoci/OpenSMOKEppPostProcessor
projects/Mac/compile_QT5.sh
Shell
gpl-3.0
57
#!/bin/bash pushd foreman echo "gem 'foreman_docker', :git => 'https://${GIT_HOSTNAME}/${GIT_ORGANIZATION}/foreman_docker.git', :branch => '${gitlabTargetBranch}'" >> bundler.d/Gemfile.local.rb popd foreman
kbidarkar/robottelo-ci
scripts/add_foreman_docker_gitlab.sh
Shell
gpl-3.0
208
#!/bin/bash set -e pegasus_lite_version_major="4" pegasus_lite_version_minor="7" pegasus_lite_version_patch="0" pegasus_lite_enforce_strict_wp_check="true" pegasus_lite_version_allow_wp_auto_download="true" . pegasus-lite-common.sh pegasus_lite_init # cleanup in case of failures trap pegasus_lite_signal_int INT trap pegasus_lite_signal_term TERM trap pegasus_lite_exit EXIT echo -e "\n################################ Setting up workdir ################################" 1>&2 # work dir export pegasus_lite_work_dir=$PWD pegasus_lite_setup_work_dir echo -e "\n###################### figuring out the worker package to use ######################" 1>&2 # figure out the worker package to use pegasus_lite_worker_package echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2 # set the xbit for any executables staged /bin/chmod +x example_workflow-generalinfo_0-1.0 echo -e "\n############################# executing the user tasks #############################" 1>&2 # execute the tasks set +e pegasus-kickstart -n example_workflow::generalinfo_0:1.0 -N ID0000002 -R condorpool -L example_workflow -T 2016-11-01T06:05:01+00:00 ./example_workflow-generalinfo_0-1.0 job_ec=$? set -e
elainenaomi/sciwonc-dataflow-examples
dissertation2017/Experiment 1A/logs/w-03/20161101T060502+0000/00/00/generalinfo_0_ID0000002.sh
Shell
gpl-3.0
1,243
#!/bin/bash if which pip3>/dev/null 2>&1 ; then echo 0 > ~/install-exit-status else echo "ERROR: Python pip3 is not found on the system! This test profile needs Python pip3 to proceed." echo 2 > ~/install-exit-status fi pip3 install --user selenium unzip -o selenium-scripts-6.zip # Drivers tar -xf geckodriver-v0.24.0-linux64.tar.gz unzip -o chromedriver_linux64_v77.zip # Script echo "#!/bin/bash rm -f run-benchmark.py cp -f selenium-run-\$1.py run-benchmark.py sed -i \"s/Firefox/\$2/g\" run-benchmark.py echo \"from selenium import webdriver driver = webdriver.\$2() if \\\"browserName\\\" in driver.capabilities: browserName = driver.capabilities['browserName'] if \\\"browserVersion\\\" in driver.capabilities: browserVersion = driver.capabilities['browserVersion'] else: browserVersion = driver.capabilities['version'] print('{0} {1}'.format(browserName, browserVersion)) driver.quit()\" > browser-version.py PATH=\$HOME:\$PATH python3 ./run-benchmark.py > \$LOG_FILE 2>&1 echo \$? > ~/test-exit-status PATH=\$HOME:\$PATH python3 ./browser-version.py > ~/pts-footnote " > selenium chmod +x selenium
phoronix-test-suite/phoronix-test-suite
ob-cache/test-profiles/system/selenium-1.0.9/install.sh
Shell
gpl-3.0
1,124
#### This script is meant to be sourced by ltconfig. # ltcf-c.sh - Create a C compiler specific configuration # # Copyright (C) 1996-2000, 2001 Free Software Foundation, Inc. # Originally by Gordon Matzigkeit <[email protected]>, 1996 # # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='main(){return(0);}' ## Linker Characteristics case $host_os in cygwin* | mingw*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test "$with_gcc" != yes; then with_gnu_ld=no fi ;; esac ld_shlibs=yes if test "$with_gnu_ld" = yes; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='${wl}' # See if GNU ld supports shared libraries. case $host_os in aix3* | aix4* | aix5*) # On AIX/PPC, the GNU linker is very broken if test "$host_cpu" != ia64; then ld_shlibs=no cat <<EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.9.1, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to modify your PATH *** so that a non-GNU linker is found, and then restart. EOF fi ;; amigaos*) archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes # Samuel A. Falvo II <[email protected]> reports # that the semantics of dynamic libraries on AmigaOS, at least up # to version 4, is to share data among multiple programs linked # with the same dynamic library. Since this doesn't match the # behavior of shared libraries on other platforms, we can use # them. ld_shlibs=no ;; beos*) if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach <[email protected]> says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw*) # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' allow_undefined_flag=unsupported always_export_symbols=yes extract_expsyms_cmds='test -f $output_objdir/impgen.c || \ sed -e "/^# \/\* impgen\.c starts here \*\//,/^# \/\* impgen.c ends here \*\// { s/^# //; p; }" -e d < $0 > $output_objdir/impgen.c~ test -f $output_objdir/impgen.exe || (cd $output_objdir && \ if test "x$BUILD_CC" != "x" ; then $BUILD_CC -o impgen impgen.c ; \ else $CC -o impgen impgen.c ; fi)~ $output_objdir/impgen $dir/$soroot > $output_objdir/$soname-def' old_archive_from_expsyms_cmds='$DLLTOOL --as=$AS --dllname $soname --def $output_objdir/$soname-def --output-lib $output_objdir/$newlib' # cygwin and mingw dlls have different entry points and sets of symbols # to exclude. # FIXME: what about values for MSVC? dll_entry=__cygwin_dll_entry@12 dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12~ case $host_os in mingw*) # mingw values dll_entry=_DllMainCRTStartup@12 dll_exclude_symbols=DllMain@12,DllMainCRTStartup@12,DllEntryPoint@12~ ;; esac # mingw and cygwin differ, and it's simplest to just exclude the union # of the two symbol sets. dll_exclude_symbols=DllMain@12,_cygwin_dll_entry@12,_cygwin_noncygwin_dll_entry@12,DllMainCRTStartup@12,DllEntryPoint@12 # recent cygwin and mingw systems supply a stub DllMain which the user # can override, but on older systems we have to supply one (in ltdll.c) if test "x$lt_cv_need_dllmain" = "xyes"; then ltdll_obj='$output_objdir/$soname-ltdll.'"$objext " ltdll_cmds='test -f $output_objdir/$soname-ltdll.c || sed -e "/^# \/\* ltdll\.c starts here \*\//,/^# \/\* ltdll.c ends here \*\// { s/^# //; p; }" -e d < $0 > $output_objdir/$soname-ltdll.c~ test -f $output_objdir/$soname-ltdll.$objext || (cd $output_objdir && $CC -c $soname-ltdll.c)~' else ltdll_obj= ltdll_cmds= fi # Extract the symbol export list from an `--export-all' def file, # then regenerate the def file from the symbol export list, so that # the compiled dll only exports the symbol export list. # Be careful not to strip the DATA tag left be newer dlltools. export_symbols_cmds="$ltdll_cmds"' $DLLTOOL --export-all --exclude-symbols '$dll_exclude_symbols' --output-def $output_objdir/$soname-def '$ltdll_obj'$libobjs $convenience~ sed -e "1,/EXPORTS/d" -e "s/ @ [0-9]*//" -e "s/ *;.*$//" < $output_objdir/$soname-def > $export_symbols' # If the export-symbols file already is a .def file (1st line # is EXPORTS), use it as is. # If DATA tags from a recent dlltool are present, honour them! archive_expsym_cmds='if test "x`head -1 $export_symbols`" = xEXPORTS; then cp $export_symbols $output_objdir/$soname-def; else echo EXPORTS > $output_objdir/$soname-def; _lt_hint=1; cat $export_symbols | while read symbol; do set dummy \$symbol; case \[$]# in 2) echo " \[$]2 @ \$_lt_hint ; " >> $output_objdir/$soname-def;; *) echo " \[$]2 @ \$_lt_hint \[$]3 ; " >> $output_objdir/$soname-def;; esac; _lt_hint=`expr 1 + \$_lt_hint`; done; fi~ '"$ltdll_cmds"' $CC -Wl,--base-file,$output_objdir/$soname-base '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp~ $CC -Wl,--base-file,$output_objdir/$soname-base $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags~ $DLLTOOL --as=$AS --dllname $soname --exclude-symbols '$dll_exclude_symbols' --def $output_objdir/$soname-def --base-file $output_objdir/$soname-base --output-exp $output_objdir/$soname-exp --output-lib $output_objdir/$libname.dll.a~ $CC $output_objdir/$soname-exp '$lt_cv_cc_dll_switch' -Wl,-e,'$dll_entry' -o $output_objdir/$soname '$ltdll_obj'$libobjs $deplibs $compiler_flags' ;; netbsd* | knetbsd*-gnu) if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared -nodefaultlibs $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris* | sysv5*) if $LD -v 2>&1 | egrep 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. EOF elif $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; tpf*) ld_shlibs=yes ;; *) if $LD --help 2>&1 | egrep ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test "$ld_shlibs" = yes; then runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir' export_dynamic_flag_spec='${wl}--export-dynamic' case $host_os in cygwin* | mingw*) # dlltool doesn't understand --whole-archive et. al. whole_archive_flag_spec= ;; *) # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | egrep 'no-whole-archive' > /dev/null; then whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' else whole_archive_flag_spec= fi ;; esac fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test "$with_gcc" = yes && test -z "$link_static_flag"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix4* | aix5*) hardcode_direct=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='${wl}-f,' # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. if test "$with_gcc" = yes; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`${CC} -print-prog-name=collect2` if test -f "$collect2name" && \ strings "$collect2name" | grep resolve_lib_name >/dev/null then # We have reworked collect2 hardcode_direct=yes else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi esac shared_flag='-shared' else # not using gcc if test "$host_cpu" = ia64; then shared_flag='${wl}-G' else shared_flag='${wl}-bM:SRE' fi fi if test "$host_cpu" = ia64; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no if test $with_gnu_ld = no; then exp_sym_flag='-Bexport' no_entry_flag="" fi else # Test if we are trying to use run time linking, or normal AIX style linking. # If -brtl is somewhere in LDFLAGS, we need to do run time linking. aix_use_runtimelinking=no for ld_flag in $LDFLAGS; do if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl" ); then aix_use_runtimelinking=yes break fi done exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # -bexpall does not export symbols beginning with underscore (_) always_export_symbols=yes if test "$aix_use_runtimelinking" = yes; then # Warning - without using the other run time loading flags (-brtl), -berok will # link without error, but may produce a broken library. allow_undefined_flag=' ${wl}-berok' hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib' archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" else if test "$host_cpu" = ia64; then if test $with_gnu_ld = no; then hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols" fi else allow_undefined_flag=' ${wl}-berok' # -bexpall does not export symbols beginning with underscore (_) always_export_symbols=yes # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec=' ' build_libtool_need_lc=yes hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib' # This is similar to how AIX traditionally builds it's shared libraries. archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' fi fi ;; amigaos*) archive_cmds='$rm $output_objdir/a2ixlibrary.data~$echo "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$echo "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$echo "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$echo "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes # see comment about different semantics on the GNU ld section ld_shlibs=no ;; cygwin* | mingw*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `echo "$deplibs" | sed -e '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib /OUT:$oldlib$oldobjs$old_deplibs' fix_srcfile_path='`cygpath -w "$srcfile"`' ;; darwin* | rhapsody*) case "$host_os" in rhapsody* | darwin1.[[012]]) allow_undefined_flag='-undefined suppress' ;; *) # Darwin 1.3 on if test -z ${MACOSX_DEPLOYMENT_TARGET} ; then allow_undefined_flag='-flat_namespace -undefined suppress' else case ${MACOSX_DEPLOYMENT_TARGET} in 10.[[012]]) allow_undefined_flag='-flat_namespace -undefined suppress' ;; 10.*) allow_undefined_flag='-undefined dynamic_lookup' ;; esac fi ;; esac # Disable shared library build on OS-X older than 10.3. case $host_os in darwin[1-6]*) can_build_shared=no ;; darwin7*) can_build_shared=yes ;; esac output_verbose_link_cmd='echo' archive_cmds='$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags -install_name $rpath/$soname $verstring' module_cmds='$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags' # Don't fix this by using the ld -exported_symbols_list flag, # it doesn't exist in older darwin ld's archive_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC -dynamiclib $allow_undefined_flag -o $lib $libobjs $deplibs$compiler_flags -install_name $rpath/$soname $verstring~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' module_expsym_cmds='sed -e "s,#.*,," -e "s,^[ ]*,," -e "s,^\(..*\),_&," < $export_symbols > $output_objdir/${libname}-symbols.expsym~$CC $allow_undefined_flag -o $lib -bundle $libobjs $deplibs$compiler_flags~nmedit -s $output_objdir/${libname}-symbols.expsym ${lib}' hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported whole_archive_flag_spec='-all_load $convenience' link_all_deplibs=yes ;; freebsd1*) ld_shlibs=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | kfreebsd*-gnu) archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9* | hpux10* | hpux11*) case "$host_cpu" in ia64*) hardcode_direct=no hardcode_shlibpath_var=no archive_cmds='$LD -b +h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' ;; *) if test $with_gcc = yes; then case "$host_os" in hpux9*) archive_cmds='$rm $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;; *) archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_os in hpux9*) archive_cmds='$rm $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;; *) archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' ;; esac fi hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' hardcode_libdir_separator=: hardcode_minus_L=yes # Not in the search PATH, but as the default # location of the library. ;; esac export_dynamic_flag_spec='${wl}-E' hardcode_direct=yes ;; irix5* | irix6*) if test "$with_gcc" = yes; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib' else archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib' fi hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: link_all_deplibs=yes ;; netbsd* | knetbsd*-gnu) if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linkopts' hardcode_direct=yes hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; openbsd*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported archive_cmds='$echo "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$echo "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~$echo DATA >> $output_objdir/$libname.def~$echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~$echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' ;; osf3*) if test "$with_gcc" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib' fi hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test "$with_gcc" = yes; then allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib' hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$LD -shared${allow_undefined_flag} $libobjs $deplibs $linker_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "-exported_symbol " >> $lib.exp; echo "\$i" >> $lib.exp; done; echo "-hidden">> $lib.exp~ $LD -shared${allow_undefined_flag} -input $lib.exp $linker_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib~$rm $lib.exp' # cc supports -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi hardcode_libdir_separator=: ;; sco3.2v5*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ;; solaris*) no_undefined_flag=' -z defs' if test "$with_gcc" = yes; then archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ $CC -shared ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$rm $lib.exp' else archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs=yes ;; sunos4*) archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv5*) no_undefined_flag=' -z text' # $CC -shared without GNU ld will not create a library from C++ # object files and a static libstdc++, better avoid it by now archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~ $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$rm $lib.exp' hardcode_libdir_flag_spec= hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4.2uw2*) archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=no hardcode_shlibpath_var=no hardcode_runpath_var=yes runpath_var=LD_RUN_PATH ;; sysv5uw7* | unixware7*) no_undefined_flag='${wl}-z ${wl}text' if test "$GCC" = yes; then archive_cmds='$CC -shared ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' fi runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac fi ## Compiler Characteristics: PIC flags, static flags, etc if test "X${ac_cv_prog_cc_pic+set}" = Xset; then : else ac_cv_prog_cc_pic= ac_cv_prog_cc_shlib= ac_cv_prog_cc_wl= ac_cv_prog_cc_static= ac_cv_prog_cc_no_builtin= ac_cv_prog_cc_can_build_shared=$can_build_shared if test "$with_gcc" = yes; then ac_cv_prog_cc_wl='-Wl,' ac_cv_prog_cc_static='-static' case $host_os in aix*) # All AIX code is PIC. if test "$host_cpu" = ia64; then # AIX 5 now supports IA64 processor lt_cv_prog_cc_static='-Bstatic' else lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp' fi ;; amigaos*) # FIXME: we need at least 68020 code to build shared libraries, but # adding the `-m68020' flag to GCC prevents building anything better, # like `-m68040'. ac_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4' ;; beos* | irix5* | irix6* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; cygwin* | mingw* | os2*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). ac_cv_prog_cc_pic='-DDLL_EXPORT' ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files ac_cv_prog_cc_pic='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all ac_cv_prog_cc_pic= ;; sysv4*MP*) if test -d /usr/nec; then ac_cv_prog_cc_pic=-Kconform_pic fi ;; *) ac_cv_prog_cc_pic='-fPIC' ;; esac else # PORTME Check for PIC flags for the system compiler. case $host_os in aix*) # All AIX code is PIC. ac_cv_prog_cc_static="$ac_cv_prog_cc_static ${ac_cv_prog_cc_wl}-lC" ;; hpux9* | hpux10* | hpux11*) # Is there a better ac_cv_prog_cc_static that works with the bundled CC? ac_cv_prog_cc_wl='-Wl,' ac_cv_prog_cc_static="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive" ac_cv_prog_cc_pic='+Z' ;; irix5* | irix6*) ac_cv_prog_cc_wl='-Wl,' ac_cv_prog_cc_static='-non_shared' # PIC (with -KPIC) is the default. ;; cygwin* | mingw* | os2*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). ac_cv_prog_cc_pic='-DDLL_EXPORT' ;; newsos6) ac_cv_prog_cc_pic='-KPIC' ac_cv_prog_cc_static='-Bstatic' ;; osf3* | osf4* | osf5*) # All OSF/1 code is PIC. ac_cv_prog_cc_wl='-Wl,' ac_cv_prog_cc_static='-non_shared' ;; sco3.2v5*) ac_cv_prog_cc_pic='-Kpic' ac_cv_prog_cc_static='-dn' ac_cv_prog_cc_shlib='-belf' ;; solaris*) ac_cv_prog_cc_pic='-KPIC' ac_cv_prog_cc_static='-Bstatic' ac_cv_prog_cc_wl='-Wl,' ;; sunos4*) ac_cv_prog_cc_pic='-PIC' ac_cv_prog_cc_static='-Bstatic' ac_cv_prog_cc_wl='-Qoption ld ' ;; sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) ac_cv_prog_cc_pic='-KPIC' ac_cv_prog_cc_static='-Bstatic' ac_cv_prog_cc_wl='-Wl,' ;; uts4*) ac_cv_prog_cc_pic='-pic' ac_cv_prog_cc_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec ;then ac_cv_prog_cc_pic='-Kconform_pic' ac_cv_prog_cc_static='-Bstatic' fi ;; *) ac_cv_prog_cc_can_build_shared=no ;; esac fi case "$host_os" in # Platforms which do not suport PIC and -DPIC is meaningless # on them: *djgpp*) ac_cv_prog_cc_pic= ;; *) ac_cv_prog_cc_pic="$ac_cv_prog_cc_pic -DPIC" ;; esac fi need_lc=yes if test "$enable_shared" = yes && test "$with_gcc" = yes; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. echo $ac_n "checking whether -lc should be explicitly linked in... $ac_c" 1>&6 if eval "test \"`echo '$''{'ac_cv_archive_cmds_needs_lc'+set}'`\" = set"; then echo $ac_n "(cached) $ac_c" 1>&6 need_lc=$ac_cv_archive_cmds_needs_lc else $rm conftest* echo "static int dummy;" > conftest.$ac_ext if { (eval echo ltcf-c.sh:need_lc: \"$ac_compile\") 1>&5; (eval $ac_compile) 2>conftest.err; }; then # Append any warnings to the config.log. cat conftest.err 1>&5 soname=conftest lib=conftest libobjs=conftest.$objext deplibs= wl=$ac_cv_prog_cc_wl compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { (eval echo ltcf-c.sh:need_lc: \"$archive_cmds\") 1>&5; (eval $archive_cmds) 2>&1 | grep " -lc " 1>&5 ; }; then need_lc=no fi allow_undefined_flag=$save_allow_undefined_flag else cat conftest.err 1>&5 fi fi $rm conftest* echo "$ac_t$need_lc" 1>&6 ;; esac fi ac_cv_archive_cmds_needs_lc=$need_lc
amitksaha/spark-scheme
libs/mzs/src/foreign/gcc/ltcf-c.sh
Shell
gpl-3.0
33,990