code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
##########################################################################
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012
# Vincent C. Passaro ([email protected])
# Shannon Mitchell ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
##########################################################################
###################### Fotis Networks LLC ###############################
# By Vincent C. Passaro #
# Fotis Networks LLC #
# Vincent[.]Passaro[@]fotisnetworks[.]com #
# www.fotisnetworks.com #
###################### Fotis Networks LLC ###############################
#
# _____________________________________________________________________
# | Version | Change Information | Author | Date |
# |__________|_______________________|____________________|____________|
# | 1.0 | Initial Script | Vincent C. Passaro | 1-Aug-2012 |
# | | Creation | | |
# |__________|_______________________|____________________|____________|
#
#######################DISA INFORMATION##################################
# Group ID (Vulid): V-981
# Group Title: GEN003140
# Rule ID: SV-37476r1_rule
# Severity: medium
# Rule Version (STIG-ID): GEN003140
# Rule Title: Cron and crontab directories must be group-owned by root,
# sys, bin or cron.
#
# Vulnerability Discussion: To protect the integrity of scheduled system
# jobs and to prevent malicious modification to these jobs, crontab files
# must be secured. Failure to give group-ownership of cron or crontab
# directories to a system group provides the designated group and
# unauthorized users with the potential to access sensitive information or
# change the system configuration which could weaken the system's security
# posture.
#
# Responsibility: System Administrator
# IAControls: ECLP-1
#
# Check Content:
#
# Check the group owner of cron and crontab directories.
# Procedure:
# ls -ld /var/spool/cron
# ls -ld /etc/cron.d /etc/crontab /etc/cron.daily /etc/cron.hourly
# /etc/cron.monthly /etc/cron.weekly
# or
# ls -ld /etc/cron*|grep -v deny
# If a directory is not group-owned by root, sys, bin, or cron, this is a
# finding.
#
# Fix Text:
#
# Change the group owner of cron and crontab directories.
# chgrp root <crontab directory>
#######################DISA INFORMATION##################################
# Global Variables
PDI=GEN003140
# Start-Lockdown
for CRONDIR in /etc/cron.d /etc/crontab /etc/cron.daily /etc/cron.hourly /etc/cron.monthly /etc/cron.weekly /var/spool/cron
do
CURGOWN=`stat -c %G $CRONDIR`;
if [ "$CURGOWN" != "root" -a "$CURGOWN" != "sys" -a "$CURGOWN" != "bin" -a "$CURGOWN" != "cron" ]
then
chgrp root $CRONDIR
fi
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5/prod/GEN003140.sh
|
Shell
|
apache-2.0
| 3,803 |
#!/bin/bash
set -euo pipefail
GO_VERSION="1.9"
export GoInstallDir="/tmp/go$GO_VERSION"
mkdir -p $GoInstallDir
if [ ! -f $GoInstallDir/go/bin/go ]; then
GO_MD5="4577d9ba083ac86de78012c04a2981be"
URL=https://buildpacks.cloudfoundry.org/dependencies/go/go${GO_VERSION}.linux-amd64-${GO_MD5:0:8}.tar.gz
echo "-----> Download go ${GO_VERSION}"
curl -s -L --retry 15 --retry-delay 2 $URL -o /tmp/go.tar.gz
DOWNLOAD_MD5=$(md5sum /tmp/go.tar.gz | cut -d ' ' -f 1)
if [[ $DOWNLOAD_MD5 != $GO_MD5 ]]; then
echo " **ERROR** MD5 mismatch: got $DOWNLOAD_MD5 expected $GO_MD5"
exit 1
fi
tar xzf /tmp/go.tar.gz -C $GoInstallDir
rm /tmp/go.tar.gz
fi
if [ ! -f $GoInstallDir/go/bin/go ]; then
echo " **ERROR** Could not download go"
exit 1
fi
|
oocl-ncc-itm/nodejs-buildpack
|
scripts/install_go.sh
|
Shell
|
apache-2.0
| 777 |
source ../testsupport.sh
source ./cleanup.sh
run test.txt
grep -q "Stage hello" test.out || err "Failed to find expected stage hello"
grep -q "Stage world" test.out || err "Failed to find expected stage world"
grep -q "Stage end" test.out || err "Failed to find expected stage end"
[ ! -f test.txt.hello ] && err "Failed to find expected output test.txt.hello"
[ ! -f test.txt.hello.world ] && err "Failed to find expected output test.txt.hello.world"
[ ! -f test.txt.hello.take_me_to_your_leader ] && err "Failed to find expected output test.txt.hello.take_me_to_your_leader"
[ ! -f test.txt.hello.how_are_you ] && err "Failed to find expected output test.txt.hello.how_are_you"
grep -q "goo" *.end || err "Failed to find goo in output as expected from test.txt.hello.take_me_to_your_leader"
grep -q "foo" *.end || err "Failed to find foo in output as expected from test.txt.hello"
grep -q "boo" *.end || err "Failed to find boo in output as expected from test.txt.hello.how_are_you"
true
|
vivovip/bpipe
|
tests/nested_parallel/run.sh
|
Shell
|
bsd-3-clause
| 996 |
set -e
BUILD_DIR=$(dirname -- $0)
MONO_PATH=$BUILD_DIR/../deps/NUnit.ConsoleRunner.3.6.1/tools \
cp $BUILD_DIR/../deps/NUnit/nunit.framework.* $BUILD_DIR/gmake/lib/Release_*/
mono $BUILD_DIR/../deps/NUnit.ConsoleRunner.3.6.1/tools/nunit3-console.exe -noresult $BUILD_DIR/gmake/lib/Release_*/*Tests*.dll
|
zillemarco/CppSharp
|
build/RunTests.sh
|
Shell
|
mit
| 303 |
#!/usr/bin/env bash
DB=$1;
mysql -uhomestead -psecret -e "DROP DATABASE IF EXISTS $DB";
mysql -uhomestead -psecret -e "CREATE DATABASE $DB";
|
dnacreative/SimpleLance
|
scripts/create-mysql.sh
|
Shell
|
mit
| 142 |
#!/bin/bash
touch ~/.gitcookies
chmod 0600 ~/.gitcookies
git config --global http.cookiefile ~/.gitcookies
tr , \\t <<\__END__ >>~/.gitcookies
go.googlesource.com,FALSE,/,TRUE,2147483647,o,git-rileykarson.google.com=1/rOwTyPQnsZnGgNtlqMhkqM63-n0W68pQ7GfhAKGIy4E
__END__
|
hashicorp/terraform-provider-google
|
scripts/gogetcookie.sh
|
Shell
|
mpl-2.0
| 273 |
# Based on: https://github.com/concourse/docker-image-resource/blob/master/assets/common.sh
DOCKER_LOG_FILE=${DOCKER_LOG_FILE:-/tmp/docker.log}
SKIP_PRIVILEGED=${SKIP_PRIVILEGED:-false}
STARTUP_TIMEOUT=${STARTUP_TIMEOUT:-120}
sanitize_cgroups() {
mkdir -p /sys/fs/cgroup
mountpoint -q /sys/fs/cgroup || \
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
mount -o remount,rw /sys/fs/cgroup
sed -e 1d /proc/cgroups | while read sys hierarchy num enabled; do
if [ "$enabled" != "1" ]; then
# subsystem disabled; skip
continue
fi
grouping="$(cat /proc/self/cgroup | cut -d: -f2 | grep "\\<$sys\\>")" || true
if [ -z "$grouping" ]; then
# subsystem not mounted anywhere; mount it on its own
grouping="$sys"
fi
mountpoint="/sys/fs/cgroup/$grouping"
mkdir -p "$mountpoint"
# clear out existing mount to make sure new one is read-write
if mountpoint -q "$mountpoint"; then
umount "$mountpoint"
fi
mount -n -t cgroup -o "$grouping" cgroup "$mountpoint"
if [ "$grouping" != "$sys" ]; then
if [ -L "/sys/fs/cgroup/$sys" ]; then
rm "/sys/fs/cgroup/$sys"
fi
ln -s "$mountpoint" "/sys/fs/cgroup/$sys"
fi
done
if ! test -e /sys/fs/cgroup/systemd ; then
mkdir /sys/fs/cgroup/systemd
mount -t cgroup -o none,name=systemd none /sys/fs/cgroup/systemd
fi
}
start_docker() {
mkdir -p /var/log
mkdir -p /var/run
if [ "$SKIP_PRIVILEGED" = "false" ]; then
sanitize_cgroups
# check for /proc/sys being mounted readonly, as systemd does
if grep '/proc/sys\s\+\w\+\s\+ro,' /proc/mounts >/dev/null; then
mount -o remount,rw /proc/sys
fi
fi
local mtu=$(cat /sys/class/net/$(ip route get 8.8.8.8|awk '{ print $5 }')/mtu)
local server_args="--mtu ${mtu}"
local registry=""
server_args="${server_args}"
for registry in $3; do
server_args="${server_args} --insecure-registry ${registry}"
done
if [ -n "$4" ]; then
server_args="${server_args} --registry-mirror $4"
fi
try_start() {
dockerd --data-root /scratch/docker ${server_args} >$DOCKER_LOG_FILE 2>&1 &
echo $! > /tmp/docker.pid
sleep 1
echo waiting for docker to come up...
until docker info >/dev/null 2>&1; do
sleep 1
if ! kill -0 "$(cat /tmp/docker.pid)" 2>/dev/null; then
return 1
fi
done
}
export server_args DOCKER_LOG_FILE
declare -fx try_start
trap stop_docker EXIT
if ! timeout ${STARTUP_TIMEOUT} bash -ce 'while true; do try_start && break; done'; then
echo Docker failed to start within ${STARTUP_TIMEOUT} seconds.
return 1
fi
}
stop_docker() {
local pid=$(cat /tmp/docker.pid)
if [ -z "$pid" ]; then
return 0
fi
kill -TERM $pid
}
|
hello2009chen/spring-boot
|
ci/images/docker-lib.sh
|
Shell
|
apache-2.0
| 2,776 |
#!/bin/bash
fw_depends rvm ruby-2.2
sed -i 's|127.0.0.1|'${DBHOST}'|g' config/database.yml
rvm ruby-$MRI_VERSION do bundle install --jobs=4 --gemfile=$TROOT/Gemfile --path=vendor/bundle
rvm ruby-$MRI_VERSION do bundle exec puma -t 8:32 -w 8 --preload -b tcp://0.0.0.0:8080 -e production &
|
ashawnbandy-te-tfb/FrameworkBenchmarks
|
frameworks/Ruby/rack/run_mri_puma.sh
|
Shell
|
bsd-3-clause
| 293 |
#!/bin/bash
# obtain and optionally verify Bench / signature
# if no reference is given, the output is deliberately limited to just the signature
error()
{
echo "running bench for signature failed on line $1"
exit 1
}
trap 'error ${LINENO}' ERR
# obtain
signature=`./stockfish bench 2>&1 | grep "Nodes searched : " | awk '{print $4}'`
if [ $# -gt 0 ]; then
# compare to given reference
if [ "$1" != "$signature" ]; then
echo "signature mismatch: reference $1 obtained $signature"
exit 1
else
echo "signature OK: $signature"
fi
else
# just report signature
echo $signature
fi
|
crunchyNYC/Stockfish2016
|
tests/signature.sh
|
Shell
|
gpl-3.0
| 620 |
#!/usr/bin/env bash
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
set -Eeuo pipefail
source "$(dirname "$0")"/common.sh
source "$(dirname "$0")"/kafka-common.sh 2.6.0 5.0.0 5.0
function verify_output {
local expected=$(printf $1)
local result=$(echo $2 | sed 's/ //g')
if [[ "$result" != "$expected" ]]; then
echo "Output from Flink program does not match expected output."
echo -e "EXPECTED FOR KEY: --$expected--"
echo -e "ACTUAL: --$result--"
exit 1
fi
}
function test_setup {
start_kafka_cluster
start_confluent_schema_registry
}
function test_cleanup {
stop_confluent_schema_registry
stop_kafka_cluster
}
on_exit test_cleanup
function schema_registry_test {
setup_kafka_dist
setup_confluent_dist
retry_times_with_backoff_and_cleanup 3 5 test_setup test_cleanup
TEST_PROGRAM_JAR=${END_TO_END_DIR}/flink-confluent-schema-registry/target/TestAvroConsumerConfluent.jar
INPUT_MESSAGE_1='{"name":"Alyssa","favoriteNumber":"250","favoriteColor":"green","eventType":"meeting"}'
INPUT_MESSAGE_2='{"name":"Charlie","favoriteNumber":"10","favoriteColor":"blue","eventType":"meeting"}'
INPUT_MESSAGE_3='{"name":"Ben","favoriteNumber":"7","favoriteColor":"red","eventType":"meeting"}'
USER_SCHEMA='{"namespace":"example.avro","type":"record","name":"User","fields":[{"name":"name","type":"string","default":""},{"name":"favoriteNumber","type":"string","default":""},{"name":"favoriteColor","type":"string","default":""},{"name":"eventType","type":{"name":"EventType","type":"enum","symbols":["meeting"]}}]}'
curl -X POST \
${SCHEMA_REGISTRY_URL}/subjects/users-value/versions \
-H 'cache-control: no-cache' \
-H 'content-type: application/vnd.schemaregistry.v1+json' \
-d '{"schema": "{\"namespace\": \"example.avro\",\"type\": \"record\",\"name\": \"User\",\"fields\": [{\"name\": \"name\", \"type\": \"string\", \"default\": \"\"},{\"name\": \"favoriteNumber\", \"type\": \"string\", \"default\": \"\"},{\"name\": \"favoriteColor\", \"type\": \"string\", \"default\": \"\"},{\"name\": \"eventType\",\"type\": {\"name\": \"EventType\",\"type\": \"enum\", \"symbols\": [\"meeting\"] }}]}"}'
echo "Sending messages to Kafka topic [test-avro-input] ..."
send_messages_to_kafka_avro $INPUT_MESSAGE_1 test-avro-input $USER_SCHEMA
send_messages_to_kafka_avro $INPUT_MESSAGE_2 test-avro-input $USER_SCHEMA
send_messages_to_kafka_avro $INPUT_MESSAGE_3 test-avro-input $USER_SCHEMA
start_cluster
create_kafka_topic 1 1 test-string-out
create_kafka_topic 1 1 test-avro-out
# Read Avro message from [test-avro-input], check the schema and send message to [test-string-ou]
$FLINK_DIR/bin/flink run -d $TEST_PROGRAM_JAR \
--input-topic test-avro-input --output-string-topic test-string-out --output-avro-topic test-avro-out --output-subject test-output-subject \
--bootstrap.servers localhost:9092 --group.id myconsumer --auto.offset.reset earliest \
--schema-registry-url ${SCHEMA_REGISTRY_URL}
echo "Reading messages from Kafka topic [test-string-out] ..."
KEY_1_STRING_MSGS=$(read_messages_from_kafka 3 test-string-out Alyssa_consumer | grep Alyssa)
KEY_2_STRING_MSGS=$(read_messages_from_kafka 3 test-string-out Charlie_consumer | grep Charlie)
KEY_3_STRING_MSGS=$(read_messages_from_kafka 3 test-string-out Ben_consumer | grep Ben)
## Verifying STRING output with actual message
verify_output $INPUT_MESSAGE_1 "$KEY_1_STRING_MSGS"
verify_output $INPUT_MESSAGE_2 "$KEY_2_STRING_MSGS"
verify_output $INPUT_MESSAGE_3 "$KEY_3_STRING_MSGS"
KEY_1_AVRO_MSGS=$(read_messages_from_kafka_avro 3 test-avro-out $USER_SCHEMA Alyssa_consumer_1 | grep Alyssa)
KEY_2_AVRO_MSGS=$(read_messages_from_kafka_avro 3 test-avro-out $USER_SCHEMA Charlie_consumer_1 | grep Charlie)
KEY_3_AVRO_MSGS=$(read_messages_from_kafka_avro 3 test-avro-out $USER_SCHEMA Ben_consumer_1 | grep Ben)
## Verifying AVRO output with actual message
verify_output $INPUT_MESSAGE_1 "$KEY_1_AVRO_MSGS"
verify_output $INPUT_MESSAGE_2 "$KEY_2_AVRO_MSGS"
verify_output $INPUT_MESSAGE_3 "$KEY_3_AVRO_MSGS"
}
run_test_with_timeout 900 schema_registry_test
|
greghogan/flink
|
flink-end-to-end-tests/test-scripts/test_confluent_schema_registry.sh
|
Shell
|
apache-2.0
| 5,041 |
#!/bin/bash
FN="TxDb.Ptroglodytes.UCSC.panTro5.refGene_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/annotation/src/contrib/TxDb.Ptroglodytes.UCSC.panTro5.refGene_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/TxDb.Ptroglodytes.UCSC.panTro5.refGene_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.ptroglodytes.ucsc.pantro5.refgene/bioconductor-txdb.ptroglodytes.ucsc.pantro5.refgene_3.12.0_src_all.tar.gz"
)
MD5="76ee1d253d1c8e54c12e8d5b9db93303"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-txdb.ptroglodytes.ucsc.pantro5.refgene/post-link.sh
|
Shell
|
mit
| 1,442 |
#!/bin/bash
# config
releaseName="Brackets"
version="1.5"
dmgName="${releaseName} Release ${version}"
format="bzip2"
encryption="none"
tmpLayout="./dropDmgConfig/layouts/tempLayout"
appName="${releaseName}.app"
tempDir="tempBuild"
# rename app and copy to tempBuild director
rm -rf $tempDir
mkdir $tempDir
cp -r "./staging/${BRACKETS_APP_NAME}.app/" "$tempDir/$appName"
# create symlink to Applications folder in staging area
# with a single space as the name so it doesn't show an unlocalized name
ln -s /Applications "$tempDir/ "
# copy volume icon to staging area if one exists
customIcon=""
if [ -f ./assets/VolumeIcon.icns ]; then
cp ./assets/VolumeIcon.icns "$tempDir/.VolumeIcon.icns"
customIcon="--custom-icon"
fi
# if license folder exists, use it
if [ -d ./dropDmgConfig/licenses/bracketsLicense ]; then
customLicense="--license-folder ./dropDmgConfig/licenses/bracketsLicense"
fi
# create disk layout
rm -rf $tempLayoutDir
cp -r ./dropDmgConfig/layouts/bracketsLayout/ "$tmpLayout"
# build the DMG
echo "building DMG..."
dropdmg ./$tempDir --format $format --encryption $encryption $customIcon --layout-folder "$tmpLayout" $customLicense --volume-name "$dmgName" --base-name "$dmgName"
# clean up
rm -rf $tempDir
rm -rf $tmpLayout
|
jomolinare/brackets-shell
|
installer/mac/buildInstaller.sh
|
Shell
|
mit
| 1,256 |
#!/usr/bin/env bash
# Usage:
# ./setup-data-dir.sh [path to rkt data dir]
# The script can take one parameter being a path to the rkt data
# directory. If none is passed, /var/lib/rkt is assumed.
# Please keep it in sync with dist/init/systemd/tmpfiles.d/rkt.conf!
set -e
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
datadir="${1}"
if [[ -z "${datadir}" ]]; then
datadir="/var/lib/rkt"
fi
# Creates the directory with the given mode and given group
# 1 - directory to create if it does not exist
# 2 - mode to set the directory to
# 3 - group to set the directory ownership to
make_directory() {
local dir="${1}"
local mode="${2}"
local group="${3}"
if [[ -e "${dir}" ]]; then
chmod "${mode}" "${dir}"
else
mkdir --mode="${mode}" "${dir}"
fi
chgrp "${group}" "${dir}"
}
# Creates the file with the given mode and rkt group
# 1 - file to create if it does not exist
# 2 - mode to set the file to
create_rkt_file() {
local file="${1}"
local mode="${2}"
if [[ ! -e "${file}" ]]; then
touch "${file}"
fi
chmod "${mode}" "${file}"
chgrp rkt "${file}"
}
getent group rkt-admin || groupadd --force --system rkt-admin
getent group rkt || groupadd --force --system rkt
if which systemd-tmpfiles; then
systemd-tmpfiles --create /usr/lib/tmpfiles.d/rkt.conf
exit
fi
make_directory "${datadir}" 2750 "rkt"
make_directory "${datadir}/tmp" 2750 "rkt"
make_directory "${datadir}/cas" 2770 "rkt"
make_directory "${datadir}/cas/db" 2770 "rkt"
create_rkt_file "${datadir}/cas/db/ql.db" 0660
# the ql database uses a WAL file whose name is generated from the sha1 hash of
# the database name
create_rkt_file "${datadir}/cas/db/.34a8b4c1ad933745146fdbfef3073706ee571625" 0660
make_directory "${datadir}/cas/imagelocks" 2770 "rkt"
make_directory "${datadir}/cas/imageManifest" 2770 "rkt"
make_directory "${datadir}/cas/blob" 2770 "rkt"
make_directory "${datadir}/cas/tmp" 2770 "rkt"
make_directory "${datadir}/cas/tree" 2700 "rkt"
make_directory "${datadir}/cas/treestorelocks" 2700 "rkt"
make_directory "${datadir}/locks" 2750 "rkt"
make_directory "${datadir}/pods" 2750 "rkt"
make_directory "${datadir}/pods/embryo" 2750 "rkt"
make_directory "${datadir}/pods/prepare" 2750 "rkt"
make_directory "${datadir}/pods/prepared" 2750 "rkt"
make_directory "${datadir}/pods/run" 2750 "rkt"
make_directory "${datadir}/pods/exited-garbage" 2750 "rkt"
make_directory "${datadir}/pods/garbage" 2750 "rkt"
make_directory "/etc/rkt" 2775 "rkt-admin"
|
yifan-gu/rkt
|
dist/scripts/setup-data-dir.sh
|
Shell
|
apache-2.0
| 2,556 |
#!/bin/bash
###########################################################################
#
# Copyright 2017 Samsung Electronics All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###########################################################################
# This script reads partition size and name list from .config and
# creates a partition map cfg file for openocd.
# Created partition map cfg file can be included in the main openocd cfg script
# for flashing.
THIS_PATH=`test -d ${0%/*} && cd ${0%/*}; pwd`
# When location of this script is changed, only OS_DIR_PATH should be changed together!!!
OS_DIR_PATH=${THIS_PATH}/../../../../os
source ${OS_DIR_PATH}/.config
# Path ENV
PARTMAP_DIR_PATH=${THIS_PATH}
PARTITION_KCONFIG=${OS_DIR_PATH}/board/common/Kconfig
# FLASH BASE ADDRESS (Can it be made to read dynamically from .config?)
FLASH_BASE=0x04000000
# Partition information
partsize_list_default=`grep -A 2 'config FLASH_PART_SIZE' ${PARTITION_KCONFIG} | sed -n 's/\tdefault "\(.*\)".*/\1/p'`
partsize_list=${CONFIG_FLASH_PART_SIZE:=${partsize_list_default}}
partname_list_default=`grep -A 2 'config FLASH_PART_NAME' ${PARTITION_KCONFIG} | sed -n 's/\tdefault "\(.*\)".*/\1/p'`
partname_list=${CONFIG_FLASH_PART_NAME:=${partname_list_default}}
# OpenOCD cfg file to be created for flashing
PARTITION_MAP_CFG=${PARTMAP_DIR_PATH}/partition_map.cfg
# Partition map text for auto generation
PARTITION_MAP_HEADER="#
# Partition Map (Auto generated)
#
# Name Description Start address Size RO
set partition_list {"
PARTITION_MAP_FOOTER="}"
#Comma Separator
IFS=","
#Variables
total=1
count=1
id=1
romfs_part_exist=0
#Array to hold partition size of all partitions
part_size[$count]=0
#Array to hold partition start address of all partitions
part_start[$count]=0
echo -n "Generating partition map ... "
# Add partition map header at the beginning of cfg file
echo ${PARTITION_MAP_HEADER} > ${PARTITION_MAP_CFG}
#Loop partition size list
sum=0
for psize in $partsize_list
do
while [ "$count" -le "$total" ];
do
sum=`expr $sum + $psize`
let "count += 1"
done
part_size[$count-1]=`expr $psize \\* 1024`
part_start[$count]=`expr $sum \\* 1024`
let "total += 1"
done
for pname in $partname_list
do
if [ "$pname" == "bl1" ]; then
pname_text="BL1"
ro=0
elif [ "$pname" == "sssro" ]; then
pname_text="SSS R/O Key"
ro=1
elif [ "$pname" == "bl2" ]; then
pname_text="BL2"
ro=0
elif [ "$pname" == "sssfw" ]; then
pname_text="SSS F/W"
ro=0
elif [ "$pname" == "wlanfw" ]; then
pname_text="WLAN F/W"
ro=0
elif [ "$pname" == "os" ]; then
pname_text="OS"
ro=0
elif [ "$pname" == "apps" ]; then
pname_text="APPS"
ro=0
elif [ "$pname" == "factory" ]; then
pname_text="Factory Reset"
ro=0
elif [ "$pname" == "ota" ]; then
pname_text="OTA download"
ro=0
elif [ "$pname" == "userfs" ]; then
pname_text="USER R/W"
ro=0
elif [ "$pname" == "rom" ]; then
pname_text="ROM FS"
ro=0
romfs_part_exist=1
elif [ "$pname" == "micom" ]; then
pname_text="MICOM"
ro=0
elif [ "$pname" == "wifi" ]; then
pname_text="WIFI"
ro=0
elif [ "$pname" == "nvram" ]; then
pname_text="WiFi NVRAM"
ro=1
elif [ "$pname" == "sssrw" ]; then
pname_text="SSS R/W Key"
ro=1
elif [ "$pname" == "zoneinfo" ]; then
pname_text="ZONEINFO"
ro=1
fi
pstart="$(printf 0x"%08X" $((${part_start[$id]} + ${FLASH_BASE})))"
psize="$(printf 0x"%08X" ${part_size[$id]})"
format="$(printf \""%-15s" ${pname_text}\")"
echo " ${pname} { ${format} ${pstart} ${psize}" \
" ${ro} }" >> ${PARTITION_MAP_CFG}
let "id += 1"
done
# add termination text
echo ${PARTITION_MAP_FOOTER} >> ${PARTITION_MAP_CFG}
echo "Done"
|
sunghan-chang/TizenRT
|
build/configs/artik05x/scripts/partition_gen.sh
|
Shell
|
apache-2.0
| 4,180 |
#! /bin/bash
# A tls test.
# Copyright (C) 2003-2014 Free Software Foundation, Inc.
# This file is part of the GNU C Library.
# The GNU C Library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# The GNU C Library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with the GNU C Library; if not, see
# <http://www.gnu.org/licenses/>.
set -e
common_objpfx=$1; shift
test_via_rtld_prefix=$1; shift
test_wrapper_env=$1; shift
logfile=$common_objpfx/nptl/tst-tls6.out
# We have to find libc and nptl
library_path=${common_objpfx}:${common_objpfx}nptl
tst_tls5="${test_via_rtld_prefix} ${common_objpfx}/nptl/tst-tls5"
LC_ALL=C
export LC_ALL
LANG=C
export LANG
> $logfile
fail=0
for aligned in a e f; do
echo "preload tst-tls5mod{$aligned,b,c,d}.so" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{$aligned,b,c,d}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
echo "preload tst-tls5mod{b,$aligned,c,d}.so" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{b,$aligned,c,d}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
echo "preload tst-tls5mod{b,c,d,$aligned}.so" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{b,c,d,$aligned}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
done
echo "preload tst-tls5mod{d,a,b,c,e}" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{d,a,b,c,e}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
echo "preload tst-tls5mod{d,a,b,e,f}" >> $logfile
echo "===============" >> $logfile
${test_wrapper_env} \
LD_PRELOAD="`echo ${common_objpfx}nptl/tst-tls5mod{d,a,b,e,f}.so \
| sed 's/:$//;s/: /:/g'`" ${tst_tls5} >> $logfile || fail=1
echo >> $logfile
exit $fail
|
sebastianscatularo/glibc
|
nptl/tst-tls6.sh
|
Shell
|
gpl-2.0
| 2,544 |
#!/usr/bin/env bash
curl -s http://www.gutenberg.org/cache/epub/76/pg76.txt |
tr '[:upper:]' '[:lower:]' | grep -oE '\w+' | sort |
uniq -c | sort -nr | head -n 10
|
xianjunzhengbackup/code
|
shell/book/ch04/top-words-3.sh
|
Shell
|
mit
| 163 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The golang package that we are building.
readonly KUBE_GO_PACKAGE=k8s.io/kubernetes
readonly KUBE_GOPATH="${KUBE_OUTPUT}/go"
# The set of server targets that we are only building for Linux
# Note: if you are adding something here, you might need to add it to
# kube::build::source_targets in build/common.sh as well.
# If you update this list, please also update build/release-tars/BUILD.
kube::golang::server_targets() {
local targets=(
cmd/kube-proxy
cmd/kube-apiserver
cmd/kube-controller-manager
cmd/cloud-controller-manager
cmd/kubelet
cmd/kubeadm
cmd/hyperkube
cmd/kube-discovery
cmd/kube-aggregator
plugin/cmd/kube-scheduler
)
echo "${targets[@]}"
}
readonly KUBE_SERVER_TARGETS=($(kube::golang::server_targets))
readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}")
# The set of server targets that we are only building for Kubernetes nodes
# If you update this list, please also update build/release-tars/BUILD.
kube::golang::node_targets() {
local targets=(
cmd/kube-proxy
cmd/kubelet
)
echo "${targets[@]}"
}
readonly KUBE_NODE_TARGETS=($(kube::golang::node_targets))
readonly KUBE_NODE_BINARIES=("${KUBE_NODE_TARGETS[@]##*/}")
readonly KUBE_NODE_BINARIES_WIN=("${KUBE_NODE_BINARIES[@]/%/.exe}")
if [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then
readonly KUBE_SERVER_PLATFORMS=(linux/amd64)
readonly KUBE_NODE_PLATFORMS=(linux/amd64)
if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then
readonly KUBE_TEST_PLATFORMS=(
darwin/amd64
linux/amd64
)
readonly KUBE_CLIENT_PLATFORMS=(
darwin/amd64
linux/amd64
)
else
readonly KUBE_TEST_PLATFORMS=(linux/amd64)
readonly KUBE_CLIENT_PLATFORMS=(linux/amd64)
fi
else
# The server platform we are building on.
readonly KUBE_SERVER_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
)
# The node platforms we build for
readonly KUBE_NODE_PLATFORMS=(
linux/amd64
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
windows/amd64
)
# If we update this we should also update the set of platforms whose standard library is precompiled for in build/build-image/cross/Dockerfile
readonly KUBE_CLIENT_PLATFORMS=(
linux/amd64
linux/386
linux/arm
linux/arm64
linux/s390x
linux/ppc64le
darwin/amd64
darwin/386
windows/amd64
windows/386
)
# Which platforms we should compile test targets for. Not all client platforms need these tests
readonly KUBE_TEST_PLATFORMS=(
linux/amd64
darwin/amd64
windows/amd64
)
fi
# The set of client targets that we are building for all platforms
# If you update this list, please also update build/release-tars/BUILD.
readonly KUBE_CLIENT_TARGETS=(
cmd/kubectl
federation/cmd/kubefed
)
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}")
# The set of test targets that we are building for all platforms
# If you update this list, please also update build/release-tars/BUILD.
kube::golang::test_targets() {
local targets=(
cmd/gendocs
cmd/genkubedocs
cmd/genman
cmd/genyaml
cmd/mungedocs
cmd/genswaggertypedocs
cmd/linkcheck
examples/k8petstore/web-server/src
federation/cmd/genfeddocs
vendor/github.com/onsi/ginkgo/ginkgo
test/e2e/e2e.test
)
echo "${targets[@]}"
}
readonly KUBE_TEST_TARGETS=($(kube::golang::test_targets))
readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}")
readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}")
# If you update this list, please also update build/release-tars/BUILD.
readonly KUBE_TEST_PORTABLE=(
test/e2e/testing-manifests
test/kubemark
federation/develop
hack/e2e.go
hack/e2e-internal
hack/get-build.sh
hack/ginkgo-e2e.sh
hack/federated-ginkgo-e2e.sh
hack/lib
)
# Test targets which run on the Kubernetes clusters directly, so we only
# need to target server platforms.
# These binaries will be distributed in the kubernetes-test tarball.
# If you update this list, please also update build/release-tars/BUILD.
readonly KUBE_TEST_SERVER_TARGETS=(
cmd/kubemark
vendor/github.com/onsi/ginkgo/ginkgo
test/e2e_node/e2e_node.test
)
readonly KUBE_TEST_SERVER_BINARIES=("${KUBE_TEST_SERVER_TARGETS[@]##*/}")
readonly KUBE_TEST_SERVER_PLATFORMS=("${KUBE_SERVER_PLATFORMS[@]}")
# Gigabytes desired for parallel platform builds. 11 is fairly
# arbitrary, but is a reasonable splitting point for 2015
# laptops-versus-not.
readonly KUBE_PARALLEL_BUILD_MEMORY=11
readonly KUBE_ALL_TARGETS=(
"${KUBE_SERVER_TARGETS[@]}"
"${KUBE_CLIENT_TARGETS[@]}"
"${KUBE_TEST_TARGETS[@]}"
"${KUBE_TEST_SERVER_TARGETS[@]}"
)
readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}")
readonly KUBE_STATIC_LIBRARIES=(
cloud-controller-manager
kube-apiserver
kube-controller-manager
kube-scheduler
kube-proxy
kube-discovery
kube-aggregator
kubeadm
kubectl
)
# Add any files with those //generate annotations in the array below.
readonly KUBE_BINDATAS=(
test/e2e/generated/gobindata_util.go
)
kube::golang::is_statically_linked_library() {
local e
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
# Allow individual overrides--e.g., so that you can get a static build of
# kubectl for inclusion in a container.
if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
fi
return 1;
}
# kube::binaries_from_targets take a list of build targets and return the
# full go package to be built
kube::golang::binaries_from_targets() {
local target
for target; do
# If the target starts with what looks like a domain name, assume it has a
# fully-qualified package name rather than one that needs the Kubernetes
# package prepended.
if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then
echo "${target}"
else
echo "${KUBE_GO_PACKAGE}/${target}"
fi
done
}
# Asks golang what it thinks the host platform is. The go tool chain does some
# slightly different things when the target platform matches the host platform.
kube::golang::host_platform() {
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
}
kube::golang::current_platform() {
local os="${GOOS-}"
if [[ -z $os ]]; then
os=$(go env GOHOSTOS)
fi
local arch="${GOARCH-}"
if [[ -z $arch ]]; then
arch=$(go env GOHOSTARCH)
fi
echo "$os/$arch"
}
# Takes the the platform name ($1) and sets the appropriate golang env variables
# for that platform.
kube::golang::set_platform_envs() {
[[ -n ${1-} ]] || {
kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs"
}
export GOOS=${platform%/*}
export GOARCH=${platform##*/}
# Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64
if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then
# Dynamic CGO linking for other server architectures than linux/amd64 goes here
# If you want to include support for more server platforms than these, add arch-specific gcc names here
case "${platform}" in
"linux/arm")
export CGO_ENABLED=1
export CC=arm-linux-gnueabihf-gcc
# Use a special edge version of golang since the stable golang version used for everything else doesn't work
export GOROOT=${K8S_EDGE_GOROOT}
;;
"linux/arm64")
export CGO_ENABLED=1
export CC=aarch64-linux-gnu-gcc
;;
"linux/ppc64le")
export CGO_ENABLED=1
export CC=powerpc64le-linux-gnu-gcc
# Use a special edge version of golang since the stable golang version used for everything else doesn't work
export GOROOT=${K8S_EDGE_GOROOT}
;;
"linux/s390x")
export CGO_ENABLED=1
export CC=s390x-linux-gnu-gcc
;;
esac
fi
}
kube::golang::unset_platform_envs() {
unset GOOS
unset GOARCH
unset GOROOT
unset CGO_ENABLED
unset CC
}
# Create the GOPATH tree under $KUBE_OUTPUT
kube::golang::create_gopath_tree() {
local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}"
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
mkdir -p "${go_pkg_basedir}"
# TODO: This symlink should be relative.
if [[ ! -e "${go_pkg_dir}" || "$(readlink ${go_pkg_dir})" != "${KUBE_ROOT}" ]]; then
ln -snf "${KUBE_ROOT}" "${go_pkg_dir}"
fi
cat >"${KUBE_GOPATH}/BUILD" <<EOF
# This dummy BUILD file prevents Bazel from trying to descend through the
# infinite loop created by the symlink at
# ${go_pkg_dir}
EOF
}
# Ensure the godep tool exists and is a viable version.
kube::golang::verify_godep_version() {
local -a godep_version_string
local godep_version
local godep_min_version="63"
if ! which godep &>/dev/null; then
kube::log::usage_from_stdin <<EOF
Can't find 'godep' in PATH, please fix and retry.
See https://github.com/kubernetes/kubernetes/blob/master/docs/devel/development.md#godep-and-dependency-management for installation instructions.
EOF
return 2
fi
godep_version_string=($(godep version))
godep_version=${godep_version_string[1]/v/}
if ((godep_version<$godep_min_version)); then
kube::log::usage_from_stdin <<EOF
Detected godep version: ${godep_version_string[*]}.
Kubernetes requires godep v$godep_min_version or greater.
Please update:
go get -u github.com/tools/godep
EOF
return 2
fi
}
# Ensure the go tool exists and is a viable version.
kube::golang::verify_go_version() {
if [[ -z "$(which go)" ]]; then
kube::log::usage_from_stdin <<EOF
Can't find 'go' in PATH, please fix and retry.
See http://golang.org/doc/install for installation instructions.
EOF
return 2
fi
local go_version
go_version=($(go version))
if [[ "${go_version[2]}" < "go1.6" && "${go_version[2]}" != "devel" ]]; then
kube::log::usage_from_stdin <<EOF
Detected go version: ${go_version[*]}.
Kubernetes requires go version 1.6 or greater.
Please install Go version 1.6 or later.
EOF
return 2
fi
}
# kube::golang::setup_env will check that the `go` commands is available in
# ${PATH}. It will also check that the Go version is good enough for the
# Kubernetes build.
#
# Inputs:
# KUBE_EXTRA_GOPATH - If set, this is included in created GOPATH
#
# Outputs:
# env-var GOPATH points to our local output dir
# env-var GOBIN is unset (we want binaries in a predictable place)
# env-var GO15VENDOREXPERIMENT=1
# current directory is within GOPATH
kube::golang::setup_env() {
kube::golang::verify_go_version
kube::golang::create_gopath_tree
export GOPATH=${KUBE_GOPATH}
# Append KUBE_EXTRA_GOPATH to the GOPATH if it is defined.
if [[ -n ${KUBE_EXTRA_GOPATH:-} ]]; then
GOPATH="${GOPATH}:${KUBE_EXTRA_GOPATH}"
fi
# Change directories so that we are within the GOPATH. Some tools get really
# upset if this is not true. We use a whole fake GOPATH here to collect the
# resultant binaries. Go will not let us use GOBIN with `go install` and
# cross-compiling, and `go install -o <file>` only works for a single pkg.
local subdir
subdir=$(kube::realpath . | sed "s|$KUBE_ROOT||")
cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}"
# Set GOROOT so binaries that parse code can work properly.
export GOROOT=$(go env GOROOT)
# Unset GOBIN in case it already exists in the current session.
unset GOBIN
# This seems to matter to some tools (godep, ugorji, ginkgo...)
export GO15VENDOREXPERIMENT=1
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
# place in ${KUBE_OUTPUT_BINDIR}
#
# Ideally this wouldn't be necessary and we could just set GOBIN to
# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
# install' will place binaries that match the host platform directly in $GOBIN
# while placing cross compiled binaries into `platform_arch` subdirs. This
# complicates pretty much everything else we do around packaging and such.
kube::golang::place_bins() {
local host_platform
host_platform=$(kube::golang::host_platform)
V=2 kube::log::status "Placing binaries"
local platform
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
# The substitution on platform_src below will replace all slashes with
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
local platform_src="/${platform//\//_}"
if [[ $platform == $host_platform ]]; then
platform_src=""
rm -f "${THIS_PLATFORM_BIN}"
ln -s "${KUBE_OUTPUT_BINPATH}/${platform}" "${THIS_PLATFORM_BIN}"
fi
local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}"
if [[ -d "${full_binpath_src}" ]]; then
mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}"
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
rsync -pc {} "${KUBE_OUTPUT_BINPATH}/${platform}" \;
fi
done
}
kube::golang::fallback_if_stdlib_not_installable() {
local go_root_dir=$(go env GOROOT);
local go_host_os=$(go env GOHOSTOS);
local go_host_arch=$(go env GOHOSTARCH);
local cgo_pkg_dir=${go_root_dir}/pkg/${go_host_os}_${go_host_arch}_cgo;
if [ -e ${cgo_pkg_dir} ]; then
return 0;
fi
if [ -w ${go_root_dir}/pkg ]; then
return 0;
fi
kube::log::status "+++ Warning: stdlib pkg with cgo flag not found.";
kube::log::status "+++ Warning: stdlib pkg cannot be rebuilt since ${go_root_dir}/pkg is not writable by `whoami`";
kube::log::status "+++ Warning: Make ${go_root_dir}/pkg writable for `whoami` for a one-time stdlib install, Or"
kube::log::status "+++ Warning: Rebuild stdlib using the command 'CGO_ENABLED=0 go install -a -installsuffix cgo std'";
kube::log::status "+++ Falling back to go build, which is slower";
use_go_build=true
}
# Builds the toolchain necessary for building kube. This needs to be
# built only on the host platform.
# TODO: Find this a proper home.
# Ideally, not a shell script because testing shell scripts is painful.
kube::golang::build_kube_toolchain() {
local targets=(
hack/cmd/teststale
vendor/github.com/jteeuwen/go-bindata/go-bindata
)
local binaries
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
kube::log::status "Building the toolchain targets:" "${binaries[@]}"
go install "${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${binaries[@]:+${binaries[@]}}"
}
# Try and replicate the native binary placement of go install without
# calling go install.
kube::golang::output_filename_for_binary() {
local binary=$1
local platform=$2
local output_path="${KUBE_GOPATH}/bin"
if [[ $platform != $host_platform ]]; then
output_path="${output_path}/${platform//\//_}"
fi
local bin=$(basename "${binary}")
if [[ ${GOOS} == "windows" ]]; then
bin="${bin}.exe"
fi
echo "${output_path}/${bin}"
}
kube::golang::build_binaries_for_platform() {
local platform=$1
local use_go_build=${2-}
local -a statics=()
local -a nonstatics=()
local -a tests=()
# Temporary workaround while we have two GOROOT's (which we'll get rid of as soon as we upgrade to go1.8 for amd64 as well)
local GO=go
if [[ "${GOROOT}" == "${K8S_EDGE_GOROOT:-}" ]]; then
GO="${K8S_EDGE_GOROOT}/bin/go"
fi
V=2 kube::log::info "Env for ${platform}: GOOS=${GOOS-} GOARCH=${GOARCH-} GOROOT=${GOROOT-} CGO_ENABLED=${CGO_ENABLED-} CC=${CC-} GO=${GO}"
for binary in "${binaries[@]}"; do
if [[ "${binary}" =~ ".test"$ ]]; then
tests+=($binary)
elif kube::golang::is_statically_linked_library "${binary}"; then
statics+=($binary)
else
nonstatics+=($binary)
fi
done
if [[ "${#statics[@]}" != 0 ]]; then
kube::golang::fallback_if_stdlib_not_installable;
fi
if [[ -n ${use_go_build:-} ]]; then
kube::log::progress " "
for binary in "${statics[@]:+${statics[@]}}"; do
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
CGO_ENABLED=0 "${GO}" build -o "${outfile}" \
"${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${binary}"
kube::log::progress "*"
done
for binary in "${nonstatics[@]:+${nonstatics[@]}}"; do
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
"${GO}" build -o "${outfile}" \
"${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${binary}"
kube::log::progress "*"
done
kube::log::progress "\n"
else
# Use go install.
if [[ "${#nonstatics[@]}" != 0 ]]; then
"${GO}" install "${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${nonstatics[@]:+${nonstatics[@]}}"
fi
if [[ "${#statics[@]}" != 0 ]]; then
CGO_ENABLED=0 "${GO}" install -installsuffix cgo "${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${statics[@]:+${statics[@]}}"
fi
fi
for test in "${tests[@]:+${tests[@]}}"; do
local outfile=$(kube::golang::output_filename_for_binary "${test}" \
"${platform}")
local testpkg="$(dirname ${test})"
# Staleness check always happens on the host machine, so we don't
# have to locate the `teststale` binaries for the other platforms.
# Since we place the host binaries in `$KUBE_GOPATH/bin`, we can
# assume that the binary exists there, if it exists at all.
# Otherwise, something has gone wrong with building the `teststale`
# binary and we should safely proceed building the test binaries
# assuming that they are stale. There is no good reason to error
# out.
if test -x "${KUBE_GOPATH}/bin/teststale" && ! "${KUBE_GOPATH}/bin/teststale" -binary "${outfile}" -package "${testpkg}"
then
continue
fi
# `go test -c` below directly builds the binary. It builds the packages,
# but it never installs them. `go test -i` only installs the dependencies
# of the test, but not the test package itself. So neither `go test -c`
# nor `go test -i` installs, for example, test/e2e.a. And without that,
# doing a staleness check on k8s.io/kubernetes/test/e2e package always
# returns true (always stale). And that's why we need to install the
# test package.
"${GO}" install "${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
"${testpkg}"
mkdir -p "$(dirname ${outfile})"
"${GO}" test -i -c \
"${goflags[@]:+${goflags[@]}}" \
-gcflags "${gogcflags}" \
-ldflags "${goldflags}" \
-o "${outfile}" \
"${testpkg}"
done
}
# Return approximate physical memory available in gigabytes.
kube::golang::get_physmem() {
local mem
# Linux kernel version >=3.14, in kb
if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# Linux, in kb
if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then
echo $(( ${mem} / 1048576 ))
return
fi
# OS X, in bytes. Note that get_physmem, as used, should only ever
# run in a Linux container (because it's only used in the multiple
# platform case, which is a Dockerized build), but this is provided
# for completeness.
if mem=$(sysctl -n hw.memsize 2>/dev/null); then
echo $(( ${mem} / 1073741824 ))
return
fi
# If we can't infer it, just give up and assume a low memory system
echo 1
}
# Build binaries targets specified
#
# Input:
# $@ - targets and go flags. If no targets are set then all binaries targets
# are built.
# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
# then just the host architecture is built.
kube::golang::build_binaries() {
# Create a sub-shell so that we don't pollute the outer environment
(
# Check for `go` binary and set ${GOPATH}.
kube::golang::setup_env
V=2 kube::log::info "Go version: $(go version)"
local host_platform
host_platform=$(kube::golang::host_platform)
# Use eval to preserve embedded quoted strings.
local goflags goldflags gogcflags
eval "goflags=(${KUBE_GOFLAGS:-})"
goldflags="${KUBE_GOLDFLAGS:-} $(kube::version::ldflags)"
gogcflags="${KUBE_GOGCFLAGS:-}"
local use_go_build
local -a targets=()
local arg
for arg; do
if [[ "${arg}" == "--use_go_build" ]]; then
use_go_build=true
elif [[ "${arg}" == -* ]]; then
# Assume arguments starting with a dash are flags to pass to go.
goflags+=("${arg}")
else
targets+=("${arg}")
fi
done
if [[ ${#targets[@]} -eq 0 ]]; then
targets=("${KUBE_ALL_TARGETS[@]}")
fi
local -a platforms=(${KUBE_BUILD_PLATFORMS:-})
if [[ ${#platforms[@]} -eq 0 ]]; then
platforms=("${host_platform}")
fi
local binaries
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
local parallel=false
if [[ ${#platforms[@]} -gt 1 ]]; then
local gigs
gigs=$(kube::golang::get_physmem)
if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then
kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel"
parallel=true
else
kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial"
parallel=false
fi
fi
# First build the toolchain before building any other targets
kube::golang::build_kube_toolchain
kube::log::status "Generating bindata:" "${KUBE_BINDATAS[@]}"
for bindata in ${KUBE_BINDATAS[@]}; do
# Only try to generate bindata if the file exists, since in some cases
# one-off builds of individual directories may exclude some files.
if [[ -f "${KUBE_ROOT}/${bindata}" ]]; then
go generate "${goflags[@]:+${goflags[@]}}" "${KUBE_ROOT}/${bindata}"
fi
done
if [[ "${parallel}" == "true" ]]; then
kube::log::status "Building go targets for {${platforms[*]}} in parallel (output will appear in a burst when complete):" "${targets[@]}"
local platform
for platform in "${platforms[@]}"; do (
kube::golang::set_platform_envs "${platform}"
kube::log::status "${platform}: go build started"
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
kube::log::status "${platform}: go build finished"
) &> "/tmp//${platform//\//_}.build" &
done
local fails=0
for job in $(jobs -p); do
wait ${job} || let "fails+=1"
done
for platform in "${platforms[@]}"; do
cat "/tmp//${platform//\//_}.build"
done
exit ${fails}
else
for platform in "${platforms[@]}"; do
kube::log::status "Building go targets for ${platform}:" "${targets[@]}"
(
kube::golang::set_platform_envs "${platform}"
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
)
done
fi
)
}
|
ahakanbaba/kubernetes
|
hack/lib/golang.sh
|
Shell
|
apache-2.0
| 23,906 |
PATH=/usr/local/pypy/bin:$PATH
|
gavioto/travis-cookbooks
|
cookbooks/pypy/files/default/etc/profile.d/pypy.sh
|
Shell
|
mit
| 31 |
#!/bin/bash
mv * "$PREFIX/bin/"
mkdir -p "$PREFIX/home"
export HOME="$PREFIX/home"
sh "$PREFIX/bin/setup.sh"
|
JenCabral/bioconda-recipes
|
recipes/entrez-direct/build.sh
|
Shell
|
mit
| 110 |
#!/bin/sh
test_description='git bugreport'
TEST_PASSES_SANITIZE_LEAK=true
. ./test-lib.sh
# Headers "[System Info]" will be followed by a non-empty line if we put some
# information there; we can make sure all our headers were followed by some
# information to check if the command was successful.
HEADER_PATTERN="^\[.*\]$"
check_all_headers_populated () {
while read -r line
do
if test "$(grep "$HEADER_PATTERN" "$line")"
then
echo "$line"
read -r nextline
if test -z "$nextline"; then
return 1;
fi
fi
done
}
test_expect_success 'creates a report with content in the right places' '
test_when_finished rm git-bugreport-check-headers.txt &&
git bugreport -s check-headers &&
check_all_headers_populated <git-bugreport-check-headers.txt
'
test_expect_success 'dies if file with same name as report already exists' '
test_when_finished rm git-bugreport-duplicate.txt &&
>>git-bugreport-duplicate.txt &&
test_must_fail git bugreport --suffix duplicate
'
test_expect_success '--output-directory puts the report in the provided dir' '
test_when_finished rm -fr foo/ &&
git bugreport -o foo/ &&
test_path_is_file foo/git-bugreport-*
'
test_expect_success 'incorrect arguments abort with usage' '
test_must_fail git bugreport --false 2>output &&
test_i18ngrep usage output &&
test_path_is_missing git-bugreport-*
'
test_expect_success 'runs outside of a git dir' '
test_when_finished rm non-repo/git-bugreport-* &&
nongit git bugreport
'
test_expect_success 'can create leading directories outside of a git dir' '
test_when_finished rm -fr foo/bar/baz &&
nongit git bugreport -o foo/bar/baz
'
test_expect_success 'indicates populated hooks' '
test_when_finished rm git-bugreport-hooks.txt &&
test_when_finished rm -fr .git/hooks &&
rm -fr .git/hooks &&
mkdir .git/hooks &&
for hook in applypatch-msg prepare-commit-msg.sample
do
write_script ".git/hooks/$hook" <<-EOF || return 1
echo "hook $hook exists"
EOF
done &&
git bugreport -s hooks &&
grep applypatch-msg git-bugreport-hooks.txt &&
! grep prepare-commit-msg git-bugreport-hooks.txt
'
test_done
|
abg1979/git
|
t/t0091-bugreport.sh
|
Shell
|
gpl-2.0
| 2,114 |
#!/usr/bin/env bash
set -e -o pipefail
[[ -z "$NGIO_ENV_DEFS" ]] && . ./scripts/env-set.sh > /dev/null
if [[ "x$1" == "x-v" ]]; then VERBOSE=1; shift; fi
SITE=./www
CHECK_FOR=bad-code-excerpt
LOGFILE_PREFIX=$CHECK_FOR-log
LOGFILE_FULL=$TMP/$LOGFILE_PREFIX-full.txt
LOGFILE=$TMP/$LOGFILE_PREFIX.txt
SKIPFILE_BASE=$CHECK_FOR-skip-patterns.txt
SKIPFILE_SRC=./scripts/config/$SKIPFILE_BASE
SKIPFILE=$TMP/$SKIPFILE_BASE
if [[ ! -d $SITE ]]; then
echo "Missing site folder $SITE"
exit 1;
fi
travis_fold start $CHECK_FOR
echo "Searching site for HTML files containing bad code excerpts (BAD FILENAME)."
echo
if [[ -n "$VERBOSE" ]]; then
travis_fold start $CHECK_FOR-details
echo "Full file list with grep details:"
find $SITE -type f -name "*.html" -exec grep -Hne "BAD FILENAME" {} \; | tee $LOGFILE_FULL
travis_fold end $CHECK_FOR-details
echo
else
echo "Full file list:"
find $SITE -type f -name "*.html" -exec grep -le "BAD FILENAME" {} \; | tee $LOGFILE_FULL
fi
echo
echo "Skip patterns for paths of files known to have issues ($SKIPFILE_SRC):"
perl -pe 's/(\s+|\s*#.*)$/\n/g' $SKIPFILE_SRC | \
# Remove blank lines \
grep '.' > $SKIPFILE
cat $SKIPFILE
echo
echo "File list excluding those matching skip patterns:"
grep -v -E -f $SKIPFILE $LOGFILE_FULL | tee $LOGFILE || true
if [[ ! -s $LOGFILE ]]; then
echo "No matches, all is good!"
travis_fold end $CHECK_FOR
else
exit 1;
fi
|
IdeaBlade/angular.io
|
scripts/check-docs.sh
|
Shell
|
mit
| 1,446 |
#!/usr/bin/env bash
#
# Configure.sh Generates interactively a config.h from config.in
#
# net-tools A collection of programs that form the base set of the
# NET-3 Networking Distribution for the LINUX operating
# system.
#
# Usage: Install.sh [--nobackup] [--test]
#
# Version: Install.sh 1.65 (1996-01-12)
#
# Authors: Fred N. van Kempen, <[email protected]>
# Johannes Grosen, <[email protected]>
# Copyright 1988-1993 MicroWalt Corporation
#
# Modified:
# {1.65} Bernd eckes Eckenfels <[email protected]>
# some layout cleanups, slattach/plipconfig removed.
# --test for testinstallations added.
#
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General
# Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at
# your option) any later version.
#
#
# Make sure we're really running bash.
#
# I would really have preferred to write this script in a language with
# better string handling, but alas, bash is the only scripting language
# that I can be reasonable sure everybody has on their Linux machine.
#
CONFIG=config.h
MAKECONFIG=config.make
[ -z "$BASH" ] && { echo "Configure requires bash" 1>&2; exit 1; }
# Disable filename globbing once and for all.
# Enable function cacheing.
set -f -h
# set up reading of config file
if [ "$#" != "1" ] || [ ! -f "$1" ]; then
echo "usage: $0 configfile" 1>&2
exit 1
fi
exec 7<$1
config_fd_redir='<&7'
#
# readln reads a line into $ans.
#
# readln prompt default
#
function readln()
{
echo -n "$1"
IFS='@' read ans || exit 1
[ -z "$ans" ] && ans=$2
}
# bool processes a boolean argument
#
# bool tail
#
function bool()
{
# Slimier hack to get bash to rescan a line.
eval "set -- $1"
ans=""
while [ "$ans" != "y" -a "$ans" != "n" ]
do
readln "$1 ($2) [$3] " "$3"
done
if [ "$ans" = "y" ]; then
echo "#define $2 1" >>${CONFIG}
echo "$2=1" >>${MAKECONFIG}
else
echo "#define $2 0" >>${CONFIG}
echo "# $2=0" >> ${MAKECONFIG}
fi
raw_input_line="bool '$1' $2 $ans"
eval "$2=$ans"
}
# int processes an integer argument
#
# int tail
#
function int()
{
# Slimier hack to get bash to rescan a line.
eval "set -- $1"
ans="x"
while [ $[$ans+0] != "$ans" ];
do
readln "$1 ($2) [$3] " "$3"
done
echo "#define $2 ($ans)" >>${CONFIG}
raw_input_line="int '$1' $2 $ans"
eval "$2=$ans"
}
#
# Make sure we start out with a clean slate.
#
> config.new
> ${CONFIG}
> ${MAKECONFIG}
stack=''
branch='t'
while IFS='@' eval read raw_input_line ${config_fd_redir}
do
# Slimy hack to get bash to rescan a line.
read cmd rest <<-END_OF_COMMAND
$raw_input_line
END_OF_COMMAND
if [ "$cmd" = "*" ]; then
if [ "$branch" = "t" ]; then
echo "$raw_input_line"
# echo "# $rest" >>$CONFIG
if [ "$prevcmd" != "*" ]; then
echo >>${CONFIG}
echo "/* $rest" >>${CONFIG}
else
echo " * $rest" >>${CONFIG}
fi
prevcmd="*"
fi
else
[ "$prevcmd" = "*" ] && echo " */" >>${CONFIG}
prevcmd=""
case "$cmd" in
=) [ "$branch" = "t" ] && echo "$rest" >>${CONFIG};;
:) [ "$branch" = "t" ] && echo "$raw_input_line" ;;
int) [ "$branch" = "t" ] && int "$rest" ;;
bool) [ "$branch" = "t" ] && bool "$rest" ;;
exec) [ "$branch" = "t" ] && ( sh -c "$rest" ) ;;
if) stack="$branch $stack"
if [ "$branch" = "t" ] && eval "$rest"; then
branch=t
else
branch=f
fi ;;
else) if [ "$branch" = "t" ]; then
branch=f
else
read branch rest <<-END_OF_STACK
$stack
END_OF_STACK
fi ;;
fi) [ -z "$stack" ] && echo "Error! Extra fi." 1>&2
read branch stack <<-END_OF_STACK
$stack
END_OF_STACK
;;
esac
fi
echo "$raw_input_line" >>config.new
done
[ "$prevcmd" = "*" ] && echo " */" >>${CONFIG}
[ -z "$stack" ] || echo "Error! Unterminated if." 1>&2
mv config.new config.status
exit 0
|
kidmaple/CoolWall
|
user/net-tools/configure.sh
|
Shell
|
gpl-2.0
| 3,931 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. $(dirname ${BASH_SOURCE})/../util.sh
IP=$(kubectl --namespace=demos get svc daemon-demo \
-o go-template='{{.spec.clusterIP}}')
run "gcloud compute ssh --zone=us-central1-b $SSH_NODE --command '\\
while true; do \\
curl --connect-timeout 1 -s $IP && echo || echo \"(timeout)\"; \\
sleep 1; \\
done \\
'"
|
eastbanctech/kubernetes-contrib
|
micro-demos/daemon_sets/split1_hit_svc.sh
|
Shell
|
apache-2.0
| 944 |
#!/bin/bash
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -ex
dst=$1
shift
$(dirname $0)/gource.sh \
--disable-progress \
--stop-at-end \
--output-ppm-stream - \
$@ | \
avconv \
-y \
-r 60 \
-f image2pipe \
-vcodec ppm \
-i - \
-vcodec libx264 \
$dst
|
jcanizales/grpc
|
tools/gource/make-video.sh
|
Shell
|
bsd-3-clause
| 1,882 |
#
# This helper is for templates using GNU configure scripts.
#
do_configure() {
: ${configure_script:=./configure}
${configure_script} ${configure_args}
}
do_build() {
: ${make_cmd:=make}
${make_cmd} ${makejobs} ${make_build_args} ${make_build_target}
}
do_check() {
if [ -z "$make_cmd" ] && [ -z "$make_check_target" ]; then
if make -q check 2>/dev/null; then
:
else
if [ $? -eq 2 ]; then
msg_warn 'No target to "make check".\n'
return 0
fi
fi
fi
: ${make_cmd:=make}
: ${make_check_target:=check}
${make_cmd} ${make_check_args} ${make_check_target}
}
do_install() {
: ${make_cmd:=make}
: ${make_install_target:=install}
${make_cmd} DESTDIR=${DESTDIR} ${make_install_args} ${make_install_target}
}
|
NeoChontrix/void-packages
|
common/build-style/gnu-configure.sh
|
Shell
|
bsd-2-clause
| 743 |
#
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 4990825
# @run shell jstatGcMetaCapacityOutput1.sh
# @summary Test that output of 'jstat -gcmetacapacity 0' has expected line counts
. ${TESTSRC-.}/../../jvmstat/testlibrary/utils.sh
setup
verify_os
JSTAT="${TESTJAVA}/bin/jstat"
${JSTAT} -J-XX:+UsePerfData -J-Duser.language=en -gcmetacapacity 0 2>&1 | awk -f ${TESTSRC}/gcMetaCapacityOutput1.awk
|
frohoff/jdk8u-jdk
|
test/sun/tools/jstat/jstatGcMetaCapacityOutput1.sh
|
Shell
|
gpl-2.0
| 1,395 |
#!/usr/bin/env bash
rm "$1"
|
tsdmgz/ansible
|
test/integration/targets/script/files/remove_afile.sh
|
Shell
|
gpl-3.0
| 28 |
#!/bin/sh -
#
# $OpenBSD: newvers.sh,v 1.10 2008/03/09 12:03:03 sobrado Exp $
# $NetBSD: newvers.sh,v 1.3 1996/06/14 20:03:04 cgd Exp $
#
# Copyright (c) 1984, 1986, 1990, 1993
# The Regents of the University of California. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the University nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# @(#)newvers.sh 8.1 (Berkeley) 4/20/94
u=${USER-root} h=`hostname` t=`date`
r=`awk ' { print $3 ; exit } ' < $1`
echo "char bootprog_name[] = \"OpenBSD/alpha boot ${r}\";" > vers.c
#echo "char bootprog_rev[] = \"${r}\";" >> vers.c
#echo "char bootprog_date[] = \"${t}\";" >> vers.c
#echo "char bootprog_maker[] = \"${u}@${h}\";" >> vers.c
|
orumin/openbsd-efivars
|
arch/alpha/stand/boot/newvers.sh
|
Shell
|
isc
| 2,055 |
#!/bin/sh
set -e
# Let's automate the install some Firefox addons
# We used the instructions here to prepare our addons: http://askubuntu.com/a/73480
# Then we zipped the contents and had Packer transfer them to the /tmp directory of our VM
# Untar the file Packer uploaded for us
tar zxf /tmp/add-ons.tar.gz -C /usr/lib/iceweasel/browser/extensions
|
jameswhiteman/packer-kali
|
scripts/software.sh
|
Shell
|
isc
| 351 |
#!/bin/bash
sudo -s
xterm -e 'sudo passwd' & read -n 1 -p "Press Enter to continue once you have set a new username and password" passwdchanged
sudo apt-get update
sudo apt-get install expect -Y
sudo apt-get install openvpn -Y
cp -r /usr/share/doc/openvpn/examples/easy-rsa/2.0 /etc/openvpn/easy-rsa
cd /etc/openvpn/easy-rsa
xterm -e 'nano vars' & read -n 1 -p "Press Enter when you have inputed the value as shown here: http://15809-presscdn-0-93.pagely.netdna-cdn.com/wp-content/uploads/2016/02/MTIyMzAyMDUwNDQzODgxNzUz.png"
cd /etc/openvpn/easy-rsa
source ./vars
./clean-all
spawn ./build-ca
expect Country Name (2 letter code) [US]:
send "."
expect State or Province Name (full name) [CA]:
send "."
expect Locality Name (eg, city) [SanFrancisco]:
send "."
expect Organization Name (eg, company) [Fort-Funsion]:
send "."
expect Organizational Unit Name (eg, section) [changeme]:
send "."
expect Common Name (eg, your name or your servers hostname) [changeme]:
send "."
expect Name [changeme]:
send "."
expect Email Address [[email protected]]:
send "."
read -p "Input a Name for your Server" "servername"
spawn ./build-key-server "$servername"
expect Commmon Name:
send "$servername"
expect A challenge password?:
send ""
expect 1 out of 1 certificate requests certified, commit? [y/n]:
send "y"
echo "Time to create clients for your devices!"
cd ~
mkdir tmp
cd tmp
wget https://raw.githubusercontent.com/MaxDistructo/ShellScripts/master/OpenVPNUsers.sh
chmod 755 OpenVPNUsers.sh
./OpenVPNUsers.sh
|
MaxDistructo/ShellScripts
|
linux-shell/InstallOpenVPN.sh
|
Shell
|
mit
| 1,500 |
#!/bin/sh
set -e
./mkinitrd.sh
./strip-bin.sh
cd ..
rm -rf isodir/boot/*
mkdir -p isodir
mkdir -p isodir/boot
mkdir -p isodir/boot/grub
rm -f isodir/LudOS.iso
cp build/bin/LudOS.bin isodir/boot/LudOS.bin
cp build/bin/stripped.bin isodir/boot/stripped.bin
cp build/bin/initrd.tar isodir/boot/initrd.tar
cat > isodir/boot/grub/grub.cfg << EOF
menuentry "LudOS" {
multiboot /boot/LudOS.bin loglevel=debug
module /boot/initrd.tar initrd
}
EOF
grub-mkrescue -o build/iso/LudOS.iso isodir
|
LugosFingite/LudOS
|
tools/iso.sh
|
Shell
|
mit
| 499 |
#!/usr/bin/env bash
set -uo pipefail
IFS=$'\n\t'
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )" && pwd)"
ROOT="$(dirname "$(dirname "$DIR")")"
function assert {
if [[ "$1" != "$2" ]]; then
echo -e "\n\e[31m$3\e[0m" >&2
echo -e "Output was\n$4" >&2
exit 1
fi
echo -n '.'
}
function controller {
phpdbg -qrr "$ROOT"/bin/test-workers "$@"
}
OUTPUT="$(controller "$ROOT"/tests/examples/success.php)"
assert "$?" "0" "Exit code with failed test should be zero" "$OUTPUT"
OUTPUT="$(controller "$ROOT"/tests/examples/failure.php)"
assert "$?" "1" "Exit code with failed test should be non-zero" "$OUTPUT"
diff <(php "$ROOT"/tests/examples/with-setup.php) <(cat "$ROOT"/tests/fixtures/setup-raw.txt)
assert "$?" "0" "Running test with setup directly failed" ""
diff <(controller --setup="$ROOT"/tests/examples/setup.php "$ROOT"/tests/examples/with-setup.php "$ROOT"/tests/examples/with-setup.php | head -1 ) <(cat "$ROOT"/tests/fixtures/setup-preload.txt)
assert "$?" "0" "Running test with setup directly failed" ""
OUTPUT="$(controller --setup=foo tests/*)"
assert "$?" "1" "Setup file not found" "$OUTPUT"
printf "\n"
|
Mikulas/test-workers
|
tests/cases/controller.sh
|
Shell
|
mit
| 1,129 |
#!/bin/bash
set -e
if [ $CKANVERSION == 'master' ]
then
export CKAN_MINOR_VERSION=100
else
export CKAN_MINOR_VERSION=${CKANVERSION##*.}
fi
if (( $CKAN_MINOR_VERSION >= 9 ))
then
pytest --ckan-ini=subdir/test.ini --cov=ckanext.archiver ckanext/archiver/tests
else
nosetests --ckan --nologcapture --with-pylons=subdir/test-nose.ini --with-coverage --cover-package=ckanext.archiver --cover-inclusive --cover-erase --cover-tests ckanext/archiver/tests/nose
fi
|
ckan/ckanext-archiver
|
bin/travis-run.bash
|
Shell
|
mit
| 474 |
#!/usr/bin/env bash
docker run --rm -p 4000:4000 -v `pwd`:/src site:serve "$@"
|
mmorse1217/mmorse1217.github.com
|
docker_serve.sh
|
Shell
|
mit
| 79 |
#!/bin/bash
#
# This script is executed with TravisCI (see .travis.yml file) on the server using SSH
#
# Required :
# * pm2
# * node
# * other dependencies refereced in the README.md file
#
cd `dirname $0`
printf "`date +%Y-%m-%d` : Trying ...\n" >> deploy.log
#PM2
echo "Cheking PM2 ..."
if npm list -g --depth=0 | grep pm2
then
echo "PM2 already installed."
else
echo "Installing PM2 ..."
if which yarn > /dev/null; then
yarn global add pm2
else
npm install -g pm2
fi
echo "Installed PM2 !"
fi
## Install dependencies
echo "Installing dependencies ..."
if which yarn > /dev/null; then
yarn
else
npm install
fi
echo "Installed dependencies !"
## Build
echo "Building ..."
npm run build
echo "Building : done !"
## Run with pm2
echo "Running PM2 ..."
pm2 startOrRestart ecosystem.config.js --env production
echo "Deployed successfully !"
printf "`date +%Y-%m-%d` : \n$(git show --name-status)\n\n" >> deploy.log
|
league-of-legends-devs/feeder.lol-item-sets-generator.org
|
deploy.sh
|
Shell
|
mit
| 932 |
docker stop flask && docker rm flask
docker run --name flask -d -p 22 -p 8080:8080 sivakumarkailasam/flask_server /usr/sbin/sshd -D
docker run --link flask:flask -i -t sivakumarkailasam/fabric_server
|
sivakumar-kailasam/fabric-demo
|
run.sh
|
Shell
|
mit
| 201 |
_hr_commands() {
local cmds
local -a commands
cmds="$(hr commands)"
commands=(${(ps:\n:)cmds})
_wanted command expl "hr command" compadd -a commands
}
_hr_subcommands() {
local cmd subcmds
local -a commands
cmd="${words[2]}"
subcmds="$(hr completions $cmd ${words[3,$(($CURRENT - 1))]})"
if [ -n "$subcmds" ]; then
commands=(${(ps:\n:)subcmds})
_wanted subcommand expl "hr $cmd subcommand" compadd -a commands
else
_default
fi
}
_hr() {
case $CURRENT in
2) _hr_commands ;;
*) _hr_subcommands ;;
esac
}
compdef _hr hr
|
hashrocket/hr
|
completions/hr.zsh
|
Shell
|
mit
| 568 |
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2014:0148
#
# Security announcement date: 2014-02-10 17:32:17 UTC
# Script generation date: 2016-01-11 19:14:44 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - spacewalk-base-minimal:2.0.3-19.el6sat.noarch
# - spacewalk-dobby:2.0.3-19.el6sat.noarch
# - satellite-branding:5.6.0.23-1.el6sat.noarch
# - spacewalk-base:2.0.3-19.el6sat.noarch
# - spacewalk-base-minimal-config:2.0.3-19.el6sat.noarch
# - spacewalk-grail:2.0.3-19.el6sat.noarch
# - spacewalk-html:2.0.3-19.el6sat.noarch
# - spacewalk-java:2.0.2-58.el6sat.noarch
# - spacewalk-java-config:2.0.2-58.el6sat.noarch
# - spacewalk-java-lib:2.0.2-58.el6sat.noarch
# - spacewalk-java-oracle:2.0.2-58.el6sat.noarch
# - spacewalk-java-postgresql:2.0.2-58.el6sat.noarch
# - spacewalk-pxt:2.0.3-19.el6sat.noarch
# - spacewalk-sniglets:2.0.3-19.el6sat.noarch
# - spacewalk-taskomatic:2.0.2-58.el6sat.noarch
#
# Last versions recommanded by security team:
# - spacewalk-base-minimal:2.3.2-27.el6sat.noarch
# - spacewalk-dobby:2.3.2-27.el6sat.noarch
# - satellite-branding:5.5.0.9-1.el6sat.noarch
# - spacewalk-base:2.3.2-27.el6sat.noarch
# - spacewalk-base-minimal-config:2.3.2-27.el6sat.noarch
# - spacewalk-grail:2.3.2-27.el6sat.noarch
# - spacewalk-html:2.3.2-27.el6sat.noarch
# - spacewalk-java:2.3.8-103.el6sat.noarch
# - spacewalk-java-config:2.3.8-103.el6sat.noarch
# - spacewalk-java-lib:2.3.8-103.el6sat.noarch
# - spacewalk-java-oracle:2.3.8-103.el6sat.noarch
# - spacewalk-java-postgresql:2.3.8-103.el6sat.noarch
# - spacewalk-pxt:2.3.2-27.el6sat.noarch
# - spacewalk-sniglets:2.3.2-27.el6sat.noarch
# - spacewalk-taskomatic:2.3.8-103.el6sat.noarch
#
# CVE List:
# - CVE-2012-6149
# - CVE-2013-1869
# - CVE-2013-1871
# - CVE-2013-4415
#
# More details:
# - https://www.cyberwatch.fr/notices/RHSA-2014:0148
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install spacewalk-base-minimal-2.3.2 -y
sudo yum install spacewalk-dobby-2.3.2 -y
sudo yum install satellite-branding-5.5.0.9 -y
sudo yum install spacewalk-base-2.3.2 -y
sudo yum install spacewalk-base-minimal-config-2.3.2 -y
sudo yum install spacewalk-grail-2.3.2 -y
sudo yum install spacewalk-html-2.3.2 -y
sudo yum install spacewalk-java-2.3.8 -y
sudo yum install spacewalk-java-config-2.3.8 -y
sudo yum install spacewalk-java-lib-2.3.8 -y
sudo yum install spacewalk-java-oracle-2.3.8 -y
sudo yum install spacewalk-java-postgresql-2.3.8 -y
sudo yum install spacewalk-pxt-2.3.2 -y
sudo yum install spacewalk-sniglets-2.3.2 -y
sudo yum install spacewalk-taskomatic-2.3.8 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2014/RHSA-2014:0148.sh
|
Shell
|
mit
| 2,710 |
#!/bin/bash
# Usage: CARD='/Volumes/MY CARD' sh storage-test.bash
set -o errexit -o pipefail -o nounset
num=${NUM:-20}
folder=${FOLDER:-test-files}
log_file=${LOG_FILE:-log}
card=${CARD%/}
unset NUM FOLDER LOG_FILE CARD
print_message() {
tput setaf 2 # green
echo "$@"
tput sgr0
}
print_error() {
tput setaf 1 # red
>&2 echo "$@"
tput sgr 0
}
old_cksum='old.md5'
new_cksum='new.md5'
if [ -z "$card" ]; then
print_error "Option <CARD> required."
exit 1
fi
truncate -s 0 "$old_cksum" "$new_cksum" "$log_file"
mkdir -p "$folder"
if !( [ -d "$CARD" ] && [ -r "$CARD" ] && [ -w "$CARD" ] ); then
print_error "<$CARD> is not a readable-writable directory."
exit 1
fi
print_message "Saving test files to folder <$folder>..."
for i in $(seq "$num"); do
filename="$folder/$i"
dd if=/dev/urandom of="$filename" count=10 bs=1m 2>> "$log_file"
truncate -s 1g "$filename"
md5 "$filename" >> "$old_cksum"
done
print_message "Moving <$folder> to <$CARD>..."
rsync --ignore-times --progress --recursive "$folder" "$CARD"
print_message 'Moving back...'
rsync --ignore-times --progress --recursive "$CARD/$folder" .
print_message 'Checking MD5...'
for i in $(seq "$num"); do
filename="$folder/$i"
md5 "$filename" >> "$new_cksum"
done
if cmp --silent "$old_cksum" "$new_cksum"; then
print_message "Congratulations! <$CARD> is fine."
else
print_error 'MD5 does not match.'
fi
|
franklinyu/snippets
|
storage-test.bash
|
Shell
|
mit
| 1,387 |
#!/usr/bin/env bash
function is_git_repository {
git branch > /dev/null 2>&1
}
function git_branch_name() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/';
}
function set_git_branch() {
unset BRANCH
local branch=`git_branch_name`;
if test $branch
then
BRANCH="${branch}"
fi
}
function git_dirty() {
expr `git status --porcelain 2>/dev/null| wc -l`
}
function set_git_prompt() {
git_status="$(git status 2> /dev/null)"
if [[ ${git_status} =~ "working tree clean" ]]; then
state="${EMG}"
elif [[ ${git_status} =~ "Changes to be committed" ]]; then
state="${EMY}"
else
state="${EMR}"
fi
# Set arrow icon based on status against remote
remote=""
if [[ ${git_status} =~ "Your branch is ahead of" ]]; then
remote="↑"
elif [[ ${git_status} =~ "Your branch is behind" ]]; then
remote="↓"
elif [[ ${git_status} =~ "have diverged" ]]; then
remote="↕"
fi
# Get branch name
set_git_branch
branch=${BRANCH}
if test ${BRANCH}; then
# set the final branch string
BRANCH="${state}git:(${branch}${remote})${RESET}"
local git_dirty_prompt;
if [[ `git_dirty` -eq 0 ]]; then
git_dirty_prompt=""
else
git_dirty_prompt=" ${EMY}✗${RESET}"
fi
BRANCH=" ${BRANCH}${git_dirty_prompt}${RESET}"
else
unset BRANCH
fi
}
function update_prompt() {
local EXIT="$?" # this has to be first
set_venv_mod_prompt_modifier
if [ $EXIT -eq 0 ]
then
ret_status="${EMG}λ ${RESET}"
else
ret_status="${EMR}λ ${RESET}"
fi
if is_git_repository ; then
set_git_prompt
else
BRANCH=""
fi
PS1="${ret_status}${VENV_PROMPT_MODIFIER}${EMC}\w${RESET}${BRANCH} "
}
PROMPT_COMMAND=update_prompt
# Set directory colors
export CLICOLOR=1
export LSCOLORS=GxFxCxDxBxegedabagaced
|
connorjacobsen/bourne
|
lib/appearance.sh
|
Shell
|
mit
| 1,819 |
#!/bin/bash
SIZE=$1
awk '{s[$2]+=$1}END{for(i in s){print s[i],i}}' | sort -n | tail -$SIZE
|
andrewguy9/tt
|
naive_awk.sh
|
Shell
|
mit
| 94 |
mkdir build
mkdir build/src
git clone https://github.com/AwesomeSauceMods/AwesomeSauceCore.git build/src
git clone https://github.com/AwesomeSauceMods/CoolInfoStuff.git
mv CoolInfoStuff/* build
cd build
rm -r OpenAutomation
bash gradlew build
#
|
AwesomeSauceMods/AwesomeSauceCore
|
build.sh
|
Shell
|
mit
| 244 |
docker exec anycontent-cms-construction-kit-php70 php -v
|
nhagemann/anycontent-cms-construction-kit-php
|
docker/php70/cmd/docker-phpversion.sh
|
Shell
|
mit
| 56 |
#!/bin/bash
set -e
printf "\n\n\n\n"
echo "Starting Helicopterizer ..."
#Import helper.
. /scripts/core/helper.sh
#Run Backup.
runBackup(){
#Exec core backup.
case $2 in
--tarball)
exec /scripts/core/tarball/backup.sh
;;
--sync)
exec /scripts/core/sync/backup.sh
;;
*)
echo "Error: Invalid Parameter, Use (--tarball or --sync)."
exit 1
esac
}
#Run Restore.
runRestore(){
#Exec core restore.
case $2 in
--tarball)
exec /scripts/core/tarball/restore.sh
;;
--sync)
exec /scripts/core/sync/restore.sh
;;
*)
echo "Error: Invalid Parameter, Use (--tarball or --sync)."
exit 1
esac
}
#Call Validation General Environment Variables.
validationGeneralEnvs
#Call Print Environment Variables.
printEnvs
#Remove slash in DATA_PATH URI.
DATA_PATH=$(removeSlashUri $DATA_PATH)
case $1 in
backup | restore)
if [ "$CRON_SCHEDULE" ]; then
#Call Validation Specific Backup Environment Variables.
validationSpecificEnvs ${@}
echo "Scheduling /scripts/run.sh ${@} with cron [CRON_SCHEDULE=$CRON_SCHEDULE]"
#Set CRON_SCHEDULE=null to protect recursive scheduler.
echo -e "${CRON_SCHEDULE} CRON_SCHEDULE='' /scripts/run.sh ${@}" > /var/spool/cron/crontabs/root && crond -l 0 -f
echo "OK - Cron Job Scheduled!"
elif [ "$1" = "backup" ]; then
#Run Backup.
runBackup ${@}
elif [ "$1" = "restore" ]; then
#Run Restore.
runRestore ${@}
fi
;;
*)
echo "Params: ${@}"
echo "Error: Invalid Parameter, Use (backup or restore)."
exit 1
esac
|
frekele/helicopterizer
|
scripts/run.sh
|
Shell
|
mit
| 1,750 |
#!/bin/sh
HOST=encore.noise.gatech.edu
IP_DROP_PORT=8888
HTTP_DROP_PORT=8889
TCP_RESET_IP_PORT=8890
TCP_RESET_URL_PORT=8891
start() {
sudo iptables -A INPUT -p tcp -d $HOST --dport $IP_DROP_PORT -j DROP
sudo iptables -A INPUT -p tcp -d $HOST --dport $HTTP_DROP_PORT -m string --string 'GET /' --algo bm -j DROP
sudo iptables -A INPUT -p tcp -d $HOST --dport $TCP_RESET_IP_PORT -j REJECT --reject-with tcp-reset
sudo iptables -A INPUT -p tcp -d $HOST --dport $TCP_RESET_URL_PORT -m string --string 'GET /' --algo bm -j REJECT --reject-with tcp-reset
}
stop() {
sudo iptables -D INPUT -p tcp -d $HOST --dport $IP_DROP_PORT -j DROP
sudo iptables -D INPUT -p tcp -d $HOST --dport $HTTP_DROP_PORT -m string --string 'GET /' --algo bm -j DROP
sudo iptables -D INPUT -p tcp -d $HOST --dport $TCP_RESET_IP_PORT -j REJECT --reject-with tcp-reset
sudo iptables -D INPUT -p tcp -d $HOST --dport $TCP_RESET_URL_PORT -m string --string 'GET /' --algo bm -j REJECT --reject-with tcp-reset
}
case $1 in
start)
start
;;
stop)
stop
;;
esac
|
sburnett/web-censorship-testbed
|
setup_testbed.sh
|
Shell
|
mit
| 1,039 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2015:0216
#
# Security announcement date: 2015-02-11 20:49:41 UTC
# Script generation date: 2016-05-12 18:12:43 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - antlr-eap6.noarch:2.7.7-18.redhat_4.1.ep6.el5
# - apache-cxf.noarch:2.7.14-1.redhat_1.1.ep6.el5
# - glassfish-jsf-eap6.noarch:2.1.28-6.redhat_7.1.ep6.el5
# - guava-libraries.noarch:13.0.1-4.redhat_2.1.ep6.el5
# - hibernate4-core-eap6.noarch:4.2.17-2.SP1_redhat_1.1.ep6.el5
# - hibernate4-eap6.noarch:4.2.17-2.SP1_redhat_1.1.ep6.el5
# - hibernate4-entitymanager-eap6.noarch:4.2.17-2.SP1_redhat_1.1.ep6.el5
# - hibernate4-envers-eap6.noarch:4.2.17-2.SP1_redhat_1.1.ep6.el5
# - hibernate4-infinispan-eap6.noarch:4.2.17-2.SP1_redhat_1.1.ep6.el5
# - hornetq.noarch:2.3.21.2-1.Final_redhat_1.1.ep6.el5
# - httpserver.noarch:1.0.2-1.Final_redhat_1.1.ep6.el5
# - jboss-as-appclient.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-cli.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-client-all.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-clustering.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-cmp.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-configadmin.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-connector.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-console.noarch:2.2.12-1.Final_redhat_1.1.ep6.el5
# - jboss-as-controller.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-controller-client.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-core-security.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-deployment-repository.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-deployment-scanner.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-domain-http.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-domain-management.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-ee.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-ee-deployment.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-ejb3.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-embedded.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-host-controller.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jacorb.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jaxr.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jaxrs.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jdr.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jmx.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jpa.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jsf.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-jsr77.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-logging.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-mail.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-management-client-content.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-messaging.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-modcluster.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-naming.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-network.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-osgi.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-osgi-configadmin.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-osgi-service.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-picketlink.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-platform-mbean.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-pojo.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-process-controller.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-protocol.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-remoting.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-sar.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-security.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-server.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-system-jmx.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-threads.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-transactions.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-version.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-web.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-webservices.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-weld.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-as-xts.noarch:7.4.3-3.Final_redhat_2.1.ep6.el5
# - jboss-ejb-client.noarch:1.0.28-1.Final_redhat_1.1.ep6.el5
# - jboss-hal.noarch:2.2.12-1.Final_redhat_1.1.ep6.el5
# - jboss-marshalling.noarch:1.4.10-1.Final_redhat_1.1.ep6.el5
# - jboss-modules.noarch:1.3.5-1.Final_redhat_1.1.ep6.el5
# - jboss-remoting3.noarch:3.3.4-1.Final_redhat_1.1.ep6.el5
# - jboss-security-negotiation.noarch:2.3.6-1.Final_redhat_1.1.ep6.el5
# - jbossas-appclient.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-bundles.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-core.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-domain.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-javadocs.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-modules-eap.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-product-eap.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-standalone.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossas-welcome-content-eap.noarch:7.4.3-2.Final_redhat_2.1.ep6.el5
# - jbossts.noarch:4.17.26-1.Final_redhat_1.1.ep6.el5
# - jbossweb.noarch:7.4.10-1.Final_redhat_1.1.ep6.el5
# - jbossws-cxf.noarch:4.3.4-1.Final_redhat_1.1.ep6.el5
# - jbossws-spi.noarch:2.3.1-1.Final_redhat_1.1.ep6.el5
# - picketbox.noarch:4.0.19-10.SP10_redhat_1.1.ep6.el5
# - picketlink-bindings.noarch:2.5.3-15.SP16_redhat_1.1.ep6.el5
# - picketlink-federation.noarch:2.5.3-16.SP16_redhat_1.1.ep6.el5
# - resteasy.noarch:2.3.8-13.SP4_redhat_2.1.ep6.el5
# - sun-istack-commons.noarch:2.6.1-12.redhat_3.1.ep6.el5
# - sun-saaj-1.3-impl.noarch:1.3.16-11.SP1_redhat_2.1.ep6.el5
# - weld-core.noarch:1.1.28-1.Final_redhat_1.1.ep6.el5
# - wss4j.noarch:1.6.17-2.SP1_redhat_1.1.ep6.el5
#
# Last versions recommanded by security team:
# - antlr-eap6.noarch:2.7.7-18.redhat_4.1.ep6.el5
# - apache-cxf.noarch:2.7.17-1.redhat_1.1.ep6.el5
# - glassfish-jsf-eap6.noarch:2.1.28-9.redhat_10.1.ep6.el5
# - guava-libraries.noarch:13.0.1-4.redhat_2.1.ep6.el5
# - hibernate4-core-eap6.noarch:4.2.21-1.Final_redhat_1.1.ep6.el5
# - hibernate4-eap6.noarch:4.2.21-1.Final_redhat_1.1.ep6.el5
# - hibernate4-entitymanager-eap6.noarch:4.2.21-1.Final_redhat_1.1.ep6.el5
# - hibernate4-envers-eap6.noarch:4.2.21-1.Final_redhat_1.1.ep6.el5
# - hibernate4-infinispan-eap6.noarch:4.2.21-1.Final_redhat_1.1.ep6.el5
# - hornetq.noarch:2.3.25-7.SP6_redhat_1.1.ep6.el5
# - httpserver.noarch:1.0.5-1.Final_redhat_1.1.ep6.el5
# - jboss-as-appclient.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-cli.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-client-all.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-clustering.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-cmp.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-configadmin.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-connector.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-console.noarch:2.5.11-1.Final_redhat_1.1.ep6.el5
# - jboss-as-controller.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-controller-client.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-core-security.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-deployment-repository.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-deployment-scanner.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-domain-http.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-domain-management.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-ee.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-ee-deployment.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-ejb3.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-embedded.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-host-controller.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jacorb.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jaxr.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jaxrs.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jdr.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jmx.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jpa.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jsf.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-jsr77.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-logging.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-mail.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-management-client-content.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-messaging.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-modcluster.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-naming.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-network.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-osgi.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-osgi-configadmin.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-osgi-service.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-picketlink.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-platform-mbean.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-pojo.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-process-controller.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-protocol.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-remoting.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-sar.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-security.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-server.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-system-jmx.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-threads.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-transactions.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-version.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-web.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-webservices.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-weld.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-as-xts.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jboss-ejb-client.noarch:1.0.32-1.Final_redhat_1.1.ep6.el5
# - jboss-hal.noarch:2.5.11-1.Final_redhat_1.1.ep6.el5
# - jboss-marshalling.noarch:1.4.10-1.Final_redhat_1.1.ep6.el5
# - jboss-modules.noarch:1.3.7-1.Final_redhat_1.1.ep6.el5
# - jboss-remoting3.noarch:3.3.6-1.Final_redhat_1.1.ep6.el5
# - jboss-security-negotiation.noarch:2.3.10-1.Final_redhat_1.1.ep6.el5
# - jbossas-appclient.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-bundles.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-core.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-domain.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-javadocs.noarch:7.5.5-3.Final_redhat_3.1.ep6.el5
# - jbossas-modules-eap.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-product-eap.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-standalone.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossas-welcome-content-eap.noarch:7.5.5-2.Final_redhat_3.1.ep6.el5
# - jbossts.noarch:4.17.30-1.Final_redhat_1.1.ep6.el5
# - jbossweb.noarch:7.5.12-1.Final_redhat_1.1.ep6.el5
# - jbossws-cxf.noarch:4.3.5-4.Final_redhat_3.1.ep6.el5
# - jbossws-spi.noarch:2.3.1-1.Final_redhat_1.1.ep6.el5
# - picketbox.noarch:4.1.2-1.Final_redhat_1.1.ep6.el5
# - picketlink-bindings.noarch:2.5.4-8.SP7_redhat_1.1.ep6.el5
# - picketlink-federation.noarch:2.5.4-8.SP7_redhat_1.1.ep6.el5
# - resteasy.noarch:2.3.12-1.Final_redhat_1.1.ep6.el5
# - sun-istack-commons.noarch:2.6.1-12.redhat_3.1.ep6.el5
# - sun-saaj-1.3-impl.noarch:1.3.16-11.SP1_redhat_2.1.ep6.el5
# - weld-core.noarch:1.1.31-1.Final_redhat_1.1.ep6.el5
# - wss4j.noarch:1.6.17-2.SP1_redhat_1.1.ep6.el5
#
# CVE List:
# - CVE-2014-7827
# - CVE-2014-7839
# - CVE-2014-7849
# - CVE-2014-7853
# - CVE-2014-8122
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install antlr-eap6.noarch-2.7.7 -y
sudo yum install apache-cxf.noarch-2.7.17 -y
sudo yum install glassfish-jsf-eap6.noarch-2.1.28 -y
sudo yum install guava-libraries.noarch-13.0.1 -y
sudo yum install hibernate4-core-eap6.noarch-4.2.21 -y
sudo yum install hibernate4-eap6.noarch-4.2.21 -y
sudo yum install hibernate4-entitymanager-eap6.noarch-4.2.21 -y
sudo yum install hibernate4-envers-eap6.noarch-4.2.21 -y
sudo yum install hibernate4-infinispan-eap6.noarch-4.2.21 -y
sudo yum install hornetq.noarch-2.3.25 -y
sudo yum install httpserver.noarch-1.0.5 -y
sudo yum install jboss-as-appclient.noarch-7.5.5 -y
sudo yum install jboss-as-cli.noarch-7.5.5 -y
sudo yum install jboss-as-client-all.noarch-7.5.5 -y
sudo yum install jboss-as-clustering.noarch-7.5.5 -y
sudo yum install jboss-as-cmp.noarch-7.5.5 -y
sudo yum install jboss-as-configadmin.noarch-7.5.5 -y
sudo yum install jboss-as-connector.noarch-7.5.5 -y
sudo yum install jboss-as-console.noarch-2.5.11 -y
sudo yum install jboss-as-controller.noarch-7.5.5 -y
sudo yum install jboss-as-controller-client.noarch-7.5.5 -y
sudo yum install jboss-as-core-security.noarch-7.5.5 -y
sudo yum install jboss-as-deployment-repository.noarch-7.5.5 -y
sudo yum install jboss-as-deployment-scanner.noarch-7.5.5 -y
sudo yum install jboss-as-domain-http.noarch-7.5.5 -y
sudo yum install jboss-as-domain-management.noarch-7.5.5 -y
sudo yum install jboss-as-ee.noarch-7.5.5 -y
sudo yum install jboss-as-ee-deployment.noarch-7.5.5 -y
sudo yum install jboss-as-ejb3.noarch-7.5.5 -y
sudo yum install jboss-as-embedded.noarch-7.5.5 -y
sudo yum install jboss-as-host-controller.noarch-7.5.5 -y
sudo yum install jboss-as-jacorb.noarch-7.5.5 -y
sudo yum install jboss-as-jaxr.noarch-7.5.5 -y
sudo yum install jboss-as-jaxrs.noarch-7.5.5 -y
sudo yum install jboss-as-jdr.noarch-7.5.5 -y
sudo yum install jboss-as-jmx.noarch-7.5.5 -y
sudo yum install jboss-as-jpa.noarch-7.5.5 -y
sudo yum install jboss-as-jsf.noarch-7.5.5 -y
sudo yum install jboss-as-jsr77.noarch-7.5.5 -y
sudo yum install jboss-as-logging.noarch-7.5.5 -y
sudo yum install jboss-as-mail.noarch-7.5.5 -y
sudo yum install jboss-as-management-client-content.noarch-7.5.5 -y
sudo yum install jboss-as-messaging.noarch-7.5.5 -y
sudo yum install jboss-as-modcluster.noarch-7.5.5 -y
sudo yum install jboss-as-naming.noarch-7.5.5 -y
sudo yum install jboss-as-network.noarch-7.5.5 -y
sudo yum install jboss-as-osgi.noarch-7.5.5 -y
sudo yum install jboss-as-osgi-configadmin.noarch-7.5.5 -y
sudo yum install jboss-as-osgi-service.noarch-7.5.5 -y
sudo yum install jboss-as-picketlink.noarch-7.5.5 -y
sudo yum install jboss-as-platform-mbean.noarch-7.5.5 -y
sudo yum install jboss-as-pojo.noarch-7.5.5 -y
sudo yum install jboss-as-process-controller.noarch-7.5.5 -y
sudo yum install jboss-as-protocol.noarch-7.5.5 -y
sudo yum install jboss-as-remoting.noarch-7.5.5 -y
sudo yum install jboss-as-sar.noarch-7.5.5 -y
sudo yum install jboss-as-security.noarch-7.5.5 -y
sudo yum install jboss-as-server.noarch-7.5.5 -y
sudo yum install jboss-as-system-jmx.noarch-7.5.5 -y
sudo yum install jboss-as-threads.noarch-7.5.5 -y
sudo yum install jboss-as-transactions.noarch-7.5.5 -y
sudo yum install jboss-as-version.noarch-7.5.5 -y
sudo yum install jboss-as-web.noarch-7.5.5 -y
sudo yum install jboss-as-webservices.noarch-7.5.5 -y
sudo yum install jboss-as-weld.noarch-7.5.5 -y
sudo yum install jboss-as-xts.noarch-7.5.5 -y
sudo yum install jboss-ejb-client.noarch-1.0.32 -y
sudo yum install jboss-hal.noarch-2.5.11 -y
sudo yum install jboss-marshalling.noarch-1.4.10 -y
sudo yum install jboss-modules.noarch-1.3.7 -y
sudo yum install jboss-remoting3.noarch-3.3.6 -y
sudo yum install jboss-security-negotiation.noarch-2.3.10 -y
sudo yum install jbossas-appclient.noarch-7.5.5 -y
sudo yum install jbossas-bundles.noarch-7.5.5 -y
sudo yum install jbossas-core.noarch-7.5.5 -y
sudo yum install jbossas-domain.noarch-7.5.5 -y
sudo yum install jbossas-javadocs.noarch-7.5.5 -y
sudo yum install jbossas-modules-eap.noarch-7.5.5 -y
sudo yum install jbossas-product-eap.noarch-7.5.5 -y
sudo yum install jbossas-standalone.noarch-7.5.5 -y
sudo yum install jbossas-welcome-content-eap.noarch-7.5.5 -y
sudo yum install jbossts.noarch-4.17.30 -y
sudo yum install jbossweb.noarch-7.5.12 -y
sudo yum install jbossws-cxf.noarch-4.3.5 -y
sudo yum install jbossws-spi.noarch-2.3.1 -y
sudo yum install picketbox.noarch-4.1.2 -y
sudo yum install picketlink-bindings.noarch-2.5.4 -y
sudo yum install picketlink-federation.noarch-2.5.4 -y
sudo yum install resteasy.noarch-2.3.12 -y
sudo yum install sun-istack-commons.noarch-2.6.1 -y
sudo yum install sun-saaj-1.3-impl.noarch-1.3.16 -y
sudo yum install weld-core.noarch-1.1.31 -y
sudo yum install wss4j.noarch-1.6.17 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2015/RHSA-2015:0216.sh
|
Shell
|
mit
| 17,209 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DLA-634-1
#
# Security announcement date: 2016-09-23 00:00:00 UTC
# Script generation date: 2017-01-01 21:09:20 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - dropbear:2012.55-1.3+deb7u1
#
# Last versions recommanded by security team:
# - dropbear:2012.55-1.3+deb7u1
#
# CVE List:
# - CVE-2016-7406
# - CVE-2016-7407
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade dropbear=2012.55-1.3+deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2016/DLA-634-1.sh
|
Shell
|
mit
| 646 |
#! /bin/bash -e
dir=kits/cli
protoc -I ${dir} ${dir}/*.proto --go_out=plugins=grpc:${dir}/proto
echo >&2 "update protos OK"
|
yulefox/lamp
|
update_proto.sh
|
Shell
|
mit
| 125 |
#!/bin/bash
rebase () {
patch -p2 < ./${patch_file}
git diff *.dts > ./${patch_file}
patch -p2 -R < ./${patch_file}
}
rebase_one_dep () {
patch -p2 < ./${patch_dep_one}
patch -p2 < ./${patch_file}
git diff *.dts > ./${patch_file}
patch -p2 -R < ./${patch_file}
}
patch_file="simple/disable-ttyO1.diff"
rebase
patch_file="simple/enable-ttyO2.diff"
rebase
patch_file="simple/enable-ttyO4.diff"
rebase
patch_file="simple/enable-ttyO5.diff"
rebase
patch_file="simple/enable-i2c-1.diff"
rebase
patch_file="simple/enable-i2c-1-alt-pins.diff"
rebase
patch_file="simple/enable-i2c-1-400kHz.diff"
rebase
patch_file="simple/enable-i2c-1-alt-pins-400kHz.diff"
rebase
|
RobertCNelson/rscm
|
3.13-bone/rebase.sh
|
Shell
|
mit
| 667 |
# First, install OpenSSL
# This is also important for python
PKGS+=("install_openssl.sh")
PKGS+=("install_anaconda.sh")
PKGS+=("install_scons3.sh")
#PKGS+=("install_gcc7.1.sh")
PKGS+=("install_fftw3.sh")
PKGS+=("install_shtns.sh")
#PKGS+=("install_eigen3.sh")
PKGS+=("install_lapack.sh")
#PKGS+=("install_shtns_python.sh")
# GIT support
# + OpenSSL was updated before
# + Next, CURL is updated
PKGS+=("install_curl.sh")
# + Leave existing 'git' untouched, makes problems on this server
# The existing git works with the new curl libraries
|
schreiberx/sweet
|
mule_local/platforms/50_coolmuc_mpp2_gnu/local_software_default.sh
|
Shell
|
mit
| 558 |
#!/bin/bash
clean () {
if [ -f "/tmp/projects.html" ]
then
rm "/tmp/projects.html"
fi
if [ -f "/tmp/projects.xml" ]
then
rm "/tmp/projects.xml"
fi
}
sync_files_datetime () {
cd "projects"
for file in *
do
date=$(cat "$file" | grep "[0-9]*-[0-9]*-[0-9]*" | sed "s/-//g")
date="$date""0000"
touch "$file" -t "$date"
done
cd ..
}
build_projects_to_html () {
local template=$(cat project.template)
local rss=$(cat item-rss.template)
local project=""
local html=""
cd "projects"
for file in $(ls --sort=time)
do
local lines=()
local index=0
local description=""
while read line
do
lines[$index]="$line"
index=$(($index+1))
if [ 3 -lt $index ]
then
description="$description$line<br/>"
fi
done < "$file"
title=${lines[0]}
video=${lines[1]}
link="$file"
project="$template"
projectRss="$rss"
project="${project//TITLE/$title}"
projectRss="${projectRss//TITLE/$title}"
project="${project//VIDEO/$video}"
projectRss="${projectRss//VIDEO/$video}"
project="${project//DESCRIPTION/$description}"
projectRss="${projectRss//DESCRIPTION/$description}"
project="${project//LINK/$link}"
projectRss="${projectRss//LINK/$link}"
echo "$project" >> /tmp/projects.html
echo "$projectRss" >> /tmp/projects.xml
echo -n "+" #marker for pv (progress bar)
done | pv -s $(ls | wc -l) > /dev/null
cd ..
}
build_index_html () {
local index=$(cat index.template)
local html=$(cat /tmp/projects.html)
html="${index//PROJECTS/$html}"
echo "$html" > index.html
}
build_rss_xml () {
local rss=$(cat rss.template)
local items=$(cat /tmp/projects.xml)
rss="${rss//PROJECTS/$items}"
echo "$rss" > rss.xml
}
main () {
clean
sync_files_datetime
build_projects_to_html
build_index_html
build_rss_xml
clean
}
time main
|
dimhold/cool-projects.com
|
build.sh
|
Shell
|
mit
| 1,811 |
#!/bin/bash
# Configuring NodeJS (and NPM packages)
# Note: this installs `npm` too.
os=$(uname -s)
if [ "$os" = "Linux" ] ; then
sudo apt-get remove --purge node # unrelated package
# https://github.com/nodesource/distributions
curl -sL https://deb.nodesource.com/setup_8.x | sudo bash -
sudo apt-get install -y nodejs # this package includes npm too
sudo apt-get install -y build-essential
elif [ "$os" = "Darwin" ] ; then
! brew ls --versions node >/dev/null 2>&1 && brew install node
brew link --overwrite node
else
echo "Unsupported OS: $os"
return
fi
# Packages
npm install -g bower # The browser package manager
npm install -g coffee-script # Unfancy JavaScript
npm install -g diff2html-cli # Fast Diff to colorized HTML
npm install -g eslint # An AST-based pattern checker for JavaScript
npm install -g express # Fast, unopinionated, minimalist web framework
npm install -g express-generator # Express application generator
npm install -g generator-angular # Yeoman generator for AngularJS
npm install -g generator-webapp # Scaffold out a front-end web app
npm install -g grunt-cli # The grunt command line interface
npm install -g gulp # The streaming build system
npm install -g http-server # A simple zero-configuration command-line http server
npm install -g jshint # Static analysis tool for JavaScript
npm install -g less # Leaner CSS
npm install -g n # Interactively Manage All Your Node Versions
npm install -g nodemon # Simple monitor script for use during development of a node.js app
npm install -g typescript # TypeScript is a language for application scale JavaScript development
npm install -g yo # CLI tool for running Yeoman generators
npm cache verify
|
jivkok/dotfiles
|
configure_nodejs.sh
|
Shell
|
mit
| 1,718 |
export SENTRY_ENDPOINT="..."
export SITE_URL="127.0.0.1:3000"
export LISTEN="127.0.0.1:3000"
export RETHINK_HOST="127.0.0.1:28015"
export RETHINK_DB="rewl"
export MAIL_FROM="xxx"
export MAILGUN_API="..."
|
axcoto/rethink-weekly
|
env.sh
|
Shell
|
mit
| 204 |
####################
# functions
####################
# print available colors and their numbers
function colours() {
for i in {0..255}; do
printf "\x1b[38;5;${i}m colour${i}"
if (( $i % 5 == 0 )); then
printf "\n"
else
printf "\t"
fi
done
}
# Create a new directory and enter it
function md() {
mkdir -p "$@" && cd "$@"
}
function hist() {
history | awk '{a[$2]++}END{for(i in a){print a[i] " " i}}' | sort -rn | head
}
# find shorthand
function f() {
find . -name "$1"
}
function ng-stop() {
sudo launchctl stop homebrew.mxcl.nginx
}
function ng-start() {
sudo launchctl start homebrew.mxcl.nginx
}
function ng-restart() {
sudo launchctl start homebrew.mxcl.nginx
}
function dns-restart() {
sudo launchctl stop homebrew.mxcl.dnsmasq
sudo launchctl start homebrew.mxcl.dnsmasq
}
# Start an HTTP server from a directory, optionally specifying the port
function server() {
local port="${1:-8000}"
open "http://localhost:${port}/"
# Set the default Content-Type to `text/plain` instead of `application/octet-stream`
# And serve everything as UTF-8 (although not technically correct, this doesn’t break anything for binary files)
python -c $'import SimpleHTTPServer;\nmap = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map;\nmap[""] = "text/plain";\nfor key, value in map.items():\n\tmap[key] = value + ";charset=UTF-8";\nSimpleHTTPServer.test();' "$port"
}
# take this repo and copy it to somewhere else minus the .git stuff.
function gitexport(){
mkdir -p "$1"
git archive master | tar -x -C "$1"
}
# get gzipped size
function gz() {
echo "orig size (bytes): "
cat "$1" | wc -c
echo "gzipped size (bytes): "
gzip -c "$1" | wc -c
}
# All the dig info
function digga() {
dig +nocmd "$1" any +multiline +noall +answer
}
# Escape UTF-8 characters into their 3-byte format
function escape() {
printf "\\\x%s" $(printf "$@" | xxd -p -c1 -u)
echo # newline
}
# Decode \x{ABCD}-style Unicode escape sequences
function unidecode() {
perl -e "binmode(STDOUT, ':utf8'); print \"$@\""
echo # newline
}
# Extract archives - use: extract <file>
# Credits to http://dotfiles.org/~pseup/.bashrc
function extract() {
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xjf $1 ;;
*.tar.gz) tar xzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) rar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xf $1 ;;
*.tbz2) tar xjf $1 ;;
*.tgz) tar xzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via extract()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
# syntax highlight the contents of a file or the clipboard and place the result on the clipboard
function hl() {
if [ -z "$3" ]; then
src=$( pbpaste )
else
src=$( cat $3 )
fi
if [ -z "$2" ]; then
style="moria"
else
style="$2"
fi
echo $src | highlight -O rtf --syntax $1 --font Inconsoloata --style $style --line-number --font-size 24 | pbcopy
}
function bump () {
file=metadata.rb
grep -w "version" $file
x=$(grep -w "version" $file | grep -o "'.*'")
x="${x%\'}"
x="${x#\'}"
y=$(echo "$x" | grep -o "[0-9]*$")
old="${x%.*}"
dot=.
new=$(($y+1))
a=$x
b=$old$dot$new
sed -i '' "s/'$a'/'$b'/g" $file
grep -w "version" $file
}
|
hacker1db/Dotfiles
|
zsh/functions.zsh
|
Shell
|
mit
| 3,517 |
#!/bin/sh
clear
echo "
_____ _ _ _ _
| |___ ___ ___| |_ ___ _| | | |_ _ _ |_|
| --| _| -_| .'| _| -_| . | | . | | | _
|_____|_| |___|__,|_| |___|___| |___|_ | |_|
|___|
_____ _ _ _ _ _____ __ _____
| | |_ ___|_|___| |_ ___ ___| |_ ___ ___ | |__| | | __|___ ___ _ _
| --| | _| |_ -| _| . | . | | -_| _| | | | | | | | | | _| .'| | |
|_____|_|_|_| |_|___|_| |___| _|_|_|___|_| |_|_|_|_____| |_____|_| |__,|_ |
|_| |___|
Version: 1.2.6
Last Updated: 11/4/2021
"
#--------------------------------------------------------------------------------------------
sudo add-apt-repository -y ppa:longsleep/golang-backports
wait
sudo -E apt-get update
wait
sudo -E apt-get install -y golang-go
#sudo -E apt-get install -y golang-go.tools
wait
echo ‘PATH=”/usr/lib/go-1.17/bin:$PATH”‘ >> ~/.profile
source ~/.profile
wait
echo -e "\r\n \r\n "
go version
echo -e "\r\n \r\n "
go env
#-----------------------------------
cd ~
if [ ! -d "go" ]
mkdir go
fi
cd go/
#-----------------------------------
# --- Make Sample File ---
echo "package main" >> helloworld.go
echo "import \"fmt\"" >> helloworld.go
echo "func main() {" >> helloworld.go
echo " fmt.Printf(\"hello, world\n\")" >> helloworld.go
echo "}" >> helloworld.go
#---------------------------------
chmod u+x helloworld.go
go run helloworld.go
#---------------------------------
echo " \r\n \r\n Done! \r\n \r\n"
|
c2theg/srvBuilds
|
install_golang.sh
|
Shell
|
mit
| 1,893 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2008:0569
#
# Security announcement date: 2008-07-06 14:53:47 UTC
# Script generation date: 2017-01-27 21:18:01 UTC
#
# Operating System: CentOS 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - devhelp.i386:0.12-17.el5
# - devhelp-devel.i386:0.12-17.el5
# - firefox.i386:3.0-2.el5.centos
# - xulrunner.i386:1.9-1.el5
# - xulrunner-devel.i386:1.9-1.el5
# - devhelp.x86_64:0.12-17.el5
# - devhelp-devel.x86_64:0.12-17.el5
# - firefox.x86_64:3.0-2.el5.centos
# - xulrunner.x86_64:1.9-1.el5
# - xulrunner-devel.x86_64:1.9-1.el5
# - xulrunner-devel-unstable.x86_64:1.9-1.el5
# - yelp.x86_64:2.16.0-19.el5
#
# Last versions recommanded by security team:
# - devhelp.i386:0.12-23.el5_9
# - devhelp-devel.i386:0.12-23.el5_9
# - firefox.i386:45.7.0-1.el5.centos
# - xulrunner.i386:17.0.10-1.el5_10
# - xulrunner-devel.i386:17.0.10-1.el5_10
# - devhelp.x86_64:0.12-23.el5_9
# - devhelp-devel.x86_64:0.12-23.el5_9
# - firefox.x86_64:45.7.0-1.el5.centos
# - xulrunner.x86_64:17.0.10-1.el5_10
# - xulrunner-devel.x86_64:17.0.10-1.el5_10
# - xulrunner-devel-unstable.x86_64:1.9.0.18-1.el5_4
# - yelp.x86_64:2.16.0-30.el5_9
#
# CVE List:
# - CVE-2008-2798
# - CVE-2008-2799
# - CVE-2008-2800
# - CVE-2008-2801
# - CVE-2008-2802
# - CVE-2008-2803
# - CVE-2008-2805
# - CVE-2008-2807
# - CVE-2008-2808
# - CVE-2008-2809
# - CVE-2008-2810
# - CVE-2008-2811
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install devhelp.i386-0.12 -y
sudo yum install devhelp-devel.i386-0.12 -y
sudo yum install firefox.i386-45.7.0 -y
sudo yum install xulrunner.i386-17.0.10 -y
sudo yum install xulrunner-devel.i386-17.0.10 -y
sudo yum install devhelp.x86_64-0.12 -y
sudo yum install devhelp-devel.x86_64-0.12 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install xulrunner.x86_64-17.0.10 -y
sudo yum install xulrunner-devel.x86_64-17.0.10 -y
sudo yum install xulrunner-devel-unstable.x86_64-1.9.0.18 -y
sudo yum install yelp.x86_64-2.16.0 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_5/x86_64/2008/CESA-2008:0569.sh
|
Shell
|
mit
| 2,178 |
#!/usr/bin/env bash
echo "Configuring all..."
~/dotfiles/setup/create_symlinks.sh
~/dotfiles/setup/setup_prefs.sh
~/dotfiles/setup/install_brew.sh
~/dotfiles/setup/install_casks.sh
~/dotfiles/setup/install_packages.sh
~/dotfiles/setup/install_mas_apps.sh
~/dotfiles/setup/install_fonts.sh
~/dotfiles/setup/set_default_apps.sh
~/dotfiles/setup/setup_tm.sh
|
caleb531/dotfiles
|
setup/setup_all.sh
|
Shell
|
mit
| 357 |
#! /usr/bin/env bash
cd `python -c "import edeposit.amqp.storage.settings as s; print s.ZCONF_PATH"`
# supervisord can't stop the script, this should fix it
trap "{ pkill runzeo -SIGINT; exit 0; }" EXIT
runzeo -C zeo.conf
|
edeposit/edeposit.amqp.storage
|
bin/edeposit_storage_runzeo.sh
|
Shell
|
mit
| 225 |
#!/bin/bash
if [ ! -d /dwl/home/host ]; then
sudo mkdir -p /dwl/home/host;
fi
if [ -d /dwl/home/host/files ]; then
if [ -f /home/${DWL_USER_NAME}/files ]; then
sudo rm -f /home/${DWL_USER_NAME}/files;
fi
if [ -d /home/${DWLC_USER_NAME}/files ]; then
sudo rm -rdf /home/${DWLC_USER_NAME}/files;
fi
sudo cp -rdf /dwl/home/host/files /home/${DWLC_USER_NAME};
fi
sudo chown -R ${DWLC_USER_NAME}:${DWLC_USER_NAME} /home/${DWLC_USER_NAME}/;
# sudo chown root:root /home/${DWLC_USER_NAME};
|
davask/d-ubuntu
|
build/dwl/permission.sh
|
Shell
|
mit
| 525 |
#!/bin/bash
java -server -XX:+UseG1GC -XX:MaxGCPauseMillis=500 -XX:ParallelGCThreads=7 -Xmx5G -Xms2G -XX:UseSSE=3 -jar /home/minecraft/spigot.jar
|
ttk2/DockerBootstrapping
|
spigot.sh
|
Shell
|
mit
| 147 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2628-1
#
# Security announcement date: 2015-06-08 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:36 UTC
#
# Operating System: Ubuntu 14.10
# Architecture: i686
#
# Vulnerable packages fix on version:
# - strongswan-ike:5.1.2-0ubuntu3.3
#
# Last versions recommanded by security team:
# - strongswan-ike:5.1.2-0ubuntu3.3
#
# CVE List:
# - CVE-2015-4171
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade strongswan-ike=5.1.2-0ubuntu3.3 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.10/i686/2015/USN-2628-1.sh
|
Shell
|
mit
| 634 |
# The Brewfile handles Homebrew-based app and library installs, but there may
# still be updates and installables in the Mac App Store. There's a nifty
# command line interface to it that we can use to just install everything (like
# OS and iTunes updates), so yeah, let's do that.
echo "› softwareupdate -i -a"
softwareupdate -i -a
|
edahlseng/dotfiles
|
macos/install.sh
|
Shell
|
mit
| 336 |
#!/bin/bash
set -e
echo "What is your git account's email address?"
read email
echo "$email" > $(dirname $0)/email.config
echo "What is your name?"
read name
echo "$name" > $(dirname $0)/name.config
echo "Would you also like to install git lfs (Large File Storage)(y/n)?"
read installgitlfs
echo "$installgitlfs" > $(dirname $0)/installgitlfs.config
exit 0
|
IronPeak/BitCrow
|
git/config.sh
|
Shell
|
mit
| 362 |
#!/usr/bin/env bash
if ps -ef | grep -v grep | grep fetch.sh ; then
echo "task is already running"
exit 0
else
echo "fetch new changes"
exec $SYNC_HOME/fetch.sh
exit 0
fi
|
ghasemz/sync
|
rsync.sh
|
Shell
|
mit
| 190 |
#!/bin/bash
set -e
# Place for custom stuff you like to run
# So i can merge your changes without need to build a
# new Vagrantfile
service statusengine restart
|
nook24/statusengine-vagrant
|
install-custom.sh
|
Shell
|
mit
| 163 |
cp ../../zh-stroke-data/json/5c0f.json ./strokes/
cp ../../zh-stroke-data/json/7d05.json ./strokes/
cp ../../zh-stroke-data/json/5e3d.json ./strokes/
cp ../../zh-stroke-data/json/4f4f.json ./strokes/
cp ../../zh-stroke-data/json/5728.json ./strokes/
cp ../../zh-stroke-data/json/68ee.json ./strokes/
cp ../../zh-stroke-data/json/6797.json ./strokes/
cp ../../zh-stroke-data/json/908a.json ./strokes/
cp ../../zh-stroke-data/json/7684.json ./strokes/
cp ../../zh-stroke-data/json/623f.json ./strokes/
cp ../../zh-stroke-data/json/5b50.json ./strokes/
cp ../../zh-stroke-data/json/88e1.json ./strokes/
cp ../../zh-stroke-data/json/5979.json ./strokes/
cp ../../zh-stroke-data/json/5976.json ./strokes/
cp ../../zh-stroke-data/json/53e6.json ./strokes/
cp ../../zh-stroke-data/json/4e00.json ./strokes/
cp ../../zh-stroke-data/json/6709.json ./strokes/
cp ../../zh-stroke-data/json/5929.json ./strokes/
cp ../../zh-stroke-data/json/ff0c.json ./strokes/
cp ../../zh-stroke-data/json/751f.json ./strokes/
cp ../../zh-stroke-data/json/75c5.json ./strokes/
cp ../../zh-stroke-data/json/4e86.json ./strokes/
cp ../../zh-stroke-data/json/5abd.json ./strokes/
cp ../../zh-stroke-data/json/53eb.json ./strokes/
cp ../../zh-stroke-data/json/5e36.json ./strokes/
cp ../../zh-stroke-data/json/98df.json ./strokes/
cp ../../zh-stroke-data/json/7269.json ./strokes/
cp ../../zh-stroke-data/json/53bb.json ./strokes/
cp ../../zh-stroke-data/json/770b.json ./strokes/
cp ../../zh-stroke-data/json/7d93.json ./strokes/
cp ../../zh-stroke-data/json/904e.json ./strokes/
cp ../../zh-stroke-data/json/8def.json ./strokes/
cp ../../zh-stroke-data/json/4e0a.json ./strokes/
cp ../../zh-stroke-data/json/9047.json ./strokes/
cp ../../zh-stroke-data/json/5230.json ./strokes/
cp ../../zh-stroke-data/json/96bb.json ./strokes/
cp ../../zh-stroke-data/json/72fc.json ./strokes/
cp ../../zh-stroke-data/json/5973.json ./strokes/
cp ../../zh-stroke-data/json/5b69.json ./strokes/
cp ../../zh-stroke-data/json/59b3.json ./strokes/
cp ../../zh-stroke-data/json/8981.json ./strokes/
cp ../../zh-stroke-data/json/54ea.json ./strokes/
cp ../../zh-stroke-data/json/ff1f.json ./strokes/
cp ../../zh-stroke-data/json/6211.json ./strokes/
cp ../../zh-stroke-data/json/7d66.json ./strokes/
cp ../../zh-stroke-data/json/500b.json ./strokes/
cp ../../zh-stroke-data/json/4e3b.json ./strokes/
cp ../../zh-stroke-data/json/610f.json ./strokes/
cp ../../zh-stroke-data/json/5bb6.json ./strokes/
cp ../../zh-stroke-data/json/7136.json ./strokes/
cp ../../zh-stroke-data/json/5f8c.json ./strokes/
cp ../../zh-stroke-data/json/5403.json ./strokes/
cp ../../zh-stroke-data/json/5011.json ./strokes/
cp ../../zh-stroke-data/json/5169.json ./strokes/
cp ../../zh-stroke-data/json/ff01.json ./strokes/
cp ../../zh-stroke-data/json/5f9e.json ./strokes/
cp ../../zh-stroke-data/json/5c4b.json ./strokes/
cp ../../zh-stroke-data/json/898b.json ./strokes/
cp ../../zh-stroke-data/json/8d95.json ./strokes/
cp ../../zh-stroke-data/json/5feb.json ./strokes/
cp ../../zh-stroke-data/json/8d77.json ./strokes/
cp ../../zh-stroke-data/json/5e8a.json ./strokes/
cp ../../zh-stroke-data/json/627e.json ./strokes/
cp ../../zh-stroke-data/json/7375.json ./strokes/
cp ../../zh-stroke-data/json/4eba.json ./strokes/
cp ../../zh-stroke-data/json/5e6b.json ./strokes/
cp ../../zh-stroke-data/json/5fd9.json ./strokes/
cp ../../zh-stroke-data/json/53ef.json ./strokes/
cp ../../zh-stroke-data/json/662f.json ./strokes/
cp ../../zh-stroke-data/json/6c92.json ./strokes/
cp ../../zh-stroke-data/json/65bc.json ./strokes/
cp ../../zh-stroke-data/json/4ed6.json ./strokes/
cp ../../zh-stroke-data/json/7a7f.json ./strokes/
cp ../../zh-stroke-data/json/8863.json ./strokes/
cp ../../zh-stroke-data/json/670d.json ./strokes/
cp ../../zh-stroke-data/json/8eba.json ./strokes/
cp ../../zh-stroke-data/json/7b49.json ./strokes/
cp ../../zh-stroke-data/json/4f86.json ./strokes/
cp ../../zh-stroke-data/json/773c.json ./strokes/
cp ../../zh-stroke-data/json/775b.json ./strokes/
cp ../../zh-stroke-data/json/597d.json ./strokes/
cp ../../zh-stroke-data/json/5927.json ./strokes/
cp ../../zh-stroke-data/json/5440.json ./strokes/
cp ../../zh-stroke-data/json/56e0.json ./strokes/
cp ../../zh-stroke-data/json/70ba.json ./strokes/
cp ../../zh-stroke-data/json/60f3.json ./strokes/
cp ../../zh-stroke-data/json/6e05.json ./strokes/
cp ../../zh-stroke-data/json/695a.json ./strokes/
cp ../../zh-stroke-data/json/6a23.json ./strokes/
cp ../../zh-stroke-data/json/8033.json ./strokes/
cp ../../zh-stroke-data/json/6735.json ./strokes/
cp ../../zh-stroke-data/json/807d.json ./strokes/
cp ../../zh-stroke-data/json/8072.json ./strokes/
cp ../../zh-stroke-data/json/97f3.json ./strokes/
cp ../../zh-stroke-data/json/7259.json ./strokes/
cp ../../zh-stroke-data/json/9f52.json ./strokes/
cp ../../zh-stroke-data/json/9003.json ./strokes/
cp ../../zh-stroke-data/json/96e2.json ./strokes/
cp ../../zh-stroke-data/json/90a3.json ./strokes/
cp ../../zh-stroke-data/json/9019.json ./strokes/
cp ../../zh-stroke-data/json/6642.json ./strokes/
cp ../../zh-stroke-data/json/5019.json ./strokes/
cp ../../zh-stroke-data/json/548c.json ./strokes/
cp ../../zh-stroke-data/json/4e5f.json ./strokes/
cp ../../zh-stroke-data/json/5bb3.json ./strokes/
cp ../../zh-stroke-data/json/6015.json ./strokes/
cp ../../zh-stroke-data/json/5225.json ./strokes/
cp ../../zh-stroke-data/json/5df2.json ./strokes/
cp ../../zh-stroke-data/json/4e8b.json ./strokes/
|
ChineseCubes/react-odp
|
src/build/strokes.sh
|
Shell
|
mit
| 5,450 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2436-2
#
# Security announcement date: 2014-12-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:08 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - xserver-xorg-core:2:1.11.4-0ubuntu10.16
# - xserver-xorg-core-lts-trusty:2:1.15.1-0ubuntu2~precise4
#
# Last versions recommanded by security team:
# - xserver-xorg-core:2:1.11.4-0ubuntu10.17
# - xserver-xorg-core-lts-trusty:2:1.15.1-0ubuntu2~precise5
#
# CVE List:
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade xserver-xorg-core=2:1.11.4-0ubuntu10.17 -y
sudo apt-get install --only-upgrade xserver-xorg-core-lts-trusty=2:1.15.1-0ubuntu2~precise5 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2014/USN-2436-2.sh
|
Shell
|
mit
| 863 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2014:1167
#
# Security announcement date: 2014-09-09 23:10:17 UTC
# Script generation date: 2017-01-13 21:13:22 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - kernel-abi-whitelists.noarch:2.6.32-431.29.2.el6
# - kernel-doc.noarch:2.6.32-431.29.2.el6
# - kernel-firmware.noarch:2.6.32-431.29.2.el6
# - kernel.x86_64:2.6.32-431.29.2.el6
# - kernel-debug.x86_64:2.6.32-431.29.2.el6
# - kernel-debug-devel.x86_64:2.6.32-431.29.2.el6
# - kernel-devel.x86_64:2.6.32-431.29.2.el6
# - kernel-headers.x86_64:2.6.32-431.29.2.el6
# - perf.x86_64:2.6.32-431.29.2.el6
# - python-perf.x86_64:2.6.32-431.29.2.el6
#
# Last versions recommanded by security team:
# - kernel-abi-whitelists.noarch:2.6.32-642.13.1.el6
# - kernel-doc.noarch:2.6.32-642.13.1.el6
# - kernel-firmware.noarch:2.6.32-642.13.1.el6
# - kernel.x86_64:2.6.32-642.13.1.el6
# - kernel-debug.x86_64:2.6.32-642.13.1.el6
# - kernel-debug-devel.x86_64:2.6.32-642.13.1.el6
# - kernel-devel.x86_64:2.6.32-642.13.1.el6
# - kernel-headers.x86_64:2.6.32-642.13.1.el6
# - perf.x86_64:2.6.32-642.13.1.el6
# - python-perf.x86_64:2.6.32-642.13.1.el6
#
# CVE List:
# - CVE-2014-0205
# - CVE-2014-3535
# - CVE-2014-3917
# - CVE-2014-4667
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install kernel-abi-whitelists.noarch-2.6.32 -y
sudo yum install kernel-doc.noarch-2.6.32 -y
sudo yum install kernel-firmware.noarch-2.6.32 -y
sudo yum install kernel.x86_64-2.6.32 -y
sudo yum install kernel-debug.x86_64-2.6.32 -y
sudo yum install kernel-debug-devel.x86_64-2.6.32 -y
sudo yum install kernel-devel.x86_64-2.6.32 -y
sudo yum install kernel-headers.x86_64-2.6.32 -y
sudo yum install perf.x86_64-2.6.32 -y
sudo yum install python-perf.x86_64-2.6.32 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_6/x86_64/2014/CESA-2014:1167.sh
|
Shell
|
mit
| 1,958 |
#!/bin/bash
if [ -d /usr/local/Cellar ]; then
echo "Found Cellar directory under /usr/local/Cellar - removing"
rm -rf /usr/local/Cellar
fi
# Find and eliminate broken links, under the assumption that they are linked to the old Cellar
if [ -x /usr/local/bin/brew ]; then
echo "Found brew executable in /usr/local/bin, pruning old links and executables"
for thing in $(find /usr/local/bin -type l -exec sh -c "file -b {} | grep -q ^broken" \; -print); do
echo "Removing ${thing}"
rm -f $thing
done
rm -f brew stree genyshell
fi
|
discoverydev/my-boxen
|
remove_strange_brew.sh
|
Shell
|
mit
| 570 |
#!/bin/bash
#seat cinemaId=109&hallNo=180&uid=baidumovie&method=getSeatsByCinemaAndHall&time_stamp=1422001556275&enc=824e2fcfdc02564763613db3c75a4bd7
#sold set planId=1422098100000010980&uid=baidumovie&method=getSoldSeats&time_stamp=1422001556350&enc=42953c4914b7861b4dd5ba4ca9df43c3
#analyze
# third_id : planId
|
deevarvar/myLab
|
baidu_code/xingmei/data_api/info/info_test.sh
|
Shell
|
mit
| 320 |
#!/bin/bash
#bash script used to test port watcher
function launch_nc {
# the pid that binds $PORT should be the grand son of this script pid
echo "spawning"
nc -l 3000
}
function launch_nc2 {
echo "spawning2"
nc -l 3001
}
function prepare_nc {
#spawn an other child process
launch_nc &
launch_nc2 &
}
#spawn a child process
prepare_nc &
|
robinmonjo/dock
|
integration/assets/spawn_orphaned.sh
|
Shell
|
mit
| 359 |
#!/bin/bash
# Keeping atlas variable without prefix as it's been shared by consul and tf at the moment.
export ATLAS_TOKEN=${ATLAS_TOKEN:?"Need to set ATLAS_TOKEN non-empty"}
export ATLAS_INFRASTRUCTURE=${ATLAS_INFRASTRUCTURE:-capgemini/apollo}
export TF_VAR_user=${TF_VAR_user:?"Need to set User non-empty"}
export TF_VAR_access_key=${TF_VAR_access_key:?"Need to set TF_VAR_access_key non-empty"}
export TF_VAR_secret_key=${TF_VAR_secret_key:?"Need to set TF_VAR_secret_key non-empty"}
export TF_VAR_key_file=${TF_VAR_key_file:-$HOME/.ssh/apollo_aws_rsa}
export TF_VAR_key_name=${TF_VAR_key_name:-apollo}
# Overrides default folder in Terraform.py inventory.
export TF_VAR_STATE_ROOT="${APOLLO_ROOT}/terraform/aws"
export ANSIBLE_SSH_ARGS="-F ${APOLLO_ROOT}/terraform/${APOLLO_PROVIDER}/ssh.config -q"
# Terraform mappings needs to be statically passed as -var parameters
# so no really needed to export them. Exporting for consitency.
export TF_VAR_atlas_artifact_master=${TF_VAR_atlas_artifact_master:-capgemini/apollo-ubuntu-14.04-amd64}
export TF_VAR_atlas_artifact_slave=${TF_VAR_atlas_artifact_slave:-capgemini/apollo-ubuntu-14.04-amd64}
export TF_VAR_atlas_artifact_version_master=${TF_VAR_atlas_artifact_version_master:-6}
export TF_VAR_atlas_artifact_version_slave=${TF_VAR_atlas_artifact_version_slave:-6}
export TF_VAR_region=${TF_VAR_region:-eu-west-1}
export TF_VAR_master_size=${TF_VAR_master_size:-m3.medium}
export TF_VAR_slave_size=${TF_VAR_slave_size:-m3.medium}
export TF_VAR_slaves=${TF_VAR_slaves:-1}
export TF_VAR_availability_zones=${TF_VAR_availability_zones:-'eu-west-1a,eu-west-1b,eu-west-1c'}
export TF_VAR_public_subnet_availability_zone=${TF_VAR_public_subnet_availability_zone:-'eu-west-1a'}
export APOLLO_consul_dc=${APOLLO_consul_dc:-$TF_VAR_region}
export APOLLO_mesos_cluster_name=${APOLLO_mesos_cluster_name:-$TF_VAR_region}
|
mehulsbhatt/Apollo
|
bootstrap/aws/config-default.sh
|
Shell
|
mit
| 1,867 |
#!/bin/bash
# Puppet-Diamond
# http://diamond-methods.org/puppet-diamond.html
# http://stackoverflow.com/questions/11258737/restore-git-submodules-from-gitmodules
set -e
git config -f .gitmodules --get-regexp '^submodule\..*\.path$' |
while read path_key path
do
url_key=$(echo $path_key | sed 's/\.path/.url/')
url=$(git config -f .gitmodules --get "$url_key")
echo $url $path
git submodule add $url $path
done
|
diamond-org/puppet-diamond
|
bin/get_submodules.sh
|
Shell
|
mit
| 458 |
#!/bin/bash
mkdir -P ./deps
git clone https://github.com/kramer314/fortran-lib.git ./deps/fortran-lib
cd ./deps/fortran-lib
scons
|
kramer314/1d-vd-test
|
build-deps.sh
|
Shell
|
mit
| 131 |
# check if this is a login shell
[ "$0" = "-bash" ] && export LOGIN_BASH=1
# run bash_profile if this is not a login shell
[ -z "$LOGIN_BASH" ] && source ~/.bash_profile
# load shared shell configuration
source ~/.shrc
# History
export HISTFILE="$HOME/.bash_history"
export HISTCONTROL="ignoredups"
export PROMPT_COMMAND="history -a"
export HISTIGNORE="&:ls:[bf]g:exit"
# enable direnv (if installed)
which direnv &>/dev/null && eval "$(direnv hook bash)"
# enable mcfly (if installed)
which mcfly &>/dev/null && eval "$(mcfly init bash)"
# to avoid non-zero exit code
true
|
mikemcquaid/dotfiles
|
bashrc.sh
|
Shell
|
mit
| 580 |
#!/bin/bash
set -eou pipefail
if [[ $# -ne 3 ]]; then
echo "usage: $0 <link-cov-heatmap.R> <in.csv> <out.pdf>" 1>&2
exit -1
fi
#in: data/sample.kK.se.links.csv
#out: plots/sample.kK.se.links.pdf
script=$1
in=$2
out=$3
KMER=`echo "$in" | grep -oE 'k[0-9]+' | grep -oE '[0-9]+'`
CUTOFFFILE=`echo "$in" | awk '{gsub(/\.links\.csv$/,".links.thresh")}1'`
KCOVFILE=`echo "$in" | awk '{gsub(/\.(se|pe)\.links\.csv$/,".kmercov")}1'`
READLENFILE=`echo "$in" | awk '{gsub(/\.(se|pe)\.links\.csv$/,".readlen")}1'`
CUTOFF=`([[ -e $CUTOFFFILE ]] && cat $CUTOFFFILE) || echo 0`
KCOV=`([[ -e $KCOVFILE ]] && cat $KCOVFILE) || echo 0`
READLEN=`([[ -e $READLENFILE ]] && cat $READLENFILE) || echo 0`
echo KMER=$KMER
echo CUTOFFFILE=$CUTOFFFILE
echo KCOVFILE=$KCOVFILE
echo READLENFILE=$READLENFILE
set -o xtrace
$script $in $out $CUTOFF $KMER $KCOV $READLEN
|
mcveanlab/mccortex
|
scripts/report/make-link-plot.sh
|
Shell
|
mit
| 852 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-RoundDropMenu_Tests/RoundDropMenu.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-RoundDropMenu_Tests/RoundDropMenu.framework"
fi
|
burntheroad/RoundDropMenu
|
Example/Pods/Target Support Files/Pods-RoundDropMenu_Tests/Pods-RoundDropMenu_Tests-frameworks.sh
|
Shell
|
mit
| 3,568 |
cd bank
bundle install
rake db:migrate
rake bank:setup
cd ..
cd provider
bundle install
rake db:migrate
rake provider:setup
cd ..
cd store-system
bundle install
rake db:migrate
rake store:setup
cd ..
cd coordinator
bundle install
|
fabriciojoc/ws_coordination_protocols_rails
|
setup.sh
|
Shell
|
mit
| 229 |
#!/bin/bash
iter=1
N=1024
bigfile="../../../DATA_SETS/MNIST/mnist/train_data.csv"
while [ $iter -le $N ]; do
filename="./_test_files/_f${iter}.bin"
test -f $filename
cut -f $iter -d "," $bigfile > _temp.csv
../../../UTILS/src/asc2bin _temp.csv I2 _temp.bin
cmp _temp.bin $filename
echo "done $iter"
iter=`expr $iter + 1`
done
#for i in {1..1024}
#do
# filename="./_test_files/_f$i.bin"
# cat $bigfile | awk -F"," -v I=$i '{print $I}' > "_temp.csv"
# ../../../UTILS/src/asc2bin _temp.csv I4 _temp.bin
# cmp _temp.bin $filename
# echo "done $i"
#done
|
NerdWalletOSS/Q
|
OPERATORS/LOAD_CSV/test/check_diff.sh
|
Shell
|
mit
| 569 |
#!/bin/bash
DIR="$(cd `dirname $0`;pwd)"
# install rbenv
if [ -z `which rbenv 2> /dev/null` ]; then
RBENV_REPO="https://github.com/sstephenson/rbenv.git"
RUBYBUILD_REPO="https://github.com/sstephenson/ruby-build.git"
RBENV_HOME="~/.rbenv"
RUBYBUILD_HOME="$RBENV_HOME/plugins/ruby-build"
[ ! -d $RBENV_HOME ] && git clone $RBENV_REPO $RBENV_HOME
[ ! -d $RUBYBUILD_HOME ] && git clone $RUBYBUILD_REPO $RUBYBUILD_HOME
echo "install rbenv and ruby-build."
fi
RUBY_VERSION="2.1.3"
if [ `rbenv versions | grep $RUBY_VERSION | wc -l` -lt 1 ]; then
rbenv install $RUBY_VERSION
rbenv rehash
fi
GEMS=(chef knife-solo)
for p in ${GEMS[@]}
do
if [ `gem list --local | grep $p | wc -l` -lt 1 ]; then
gem install $p --no-ri --no-rdoc
rbenv rehash
fi
done
|
takasing/lifestream
|
bootstrap.sh
|
Shell
|
mit
| 774 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3045-1
#
# Security announcement date: 2016-08-02 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:34 UTC
#
# Operating System: Ubuntu 16.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - php7.0-fpm:7.0.8-0ubuntu0.16.04.2
# - libapache2-mod-php7.0:7.0.8-0ubuntu0.16.04.2
# - php7.0-cgi:7.0.8-0ubuntu0.16.04.2
# - php7.0-cli:7.0.8-0ubuntu0.16.04.2
#
# Last versions recommanded by security team:
# - php7.0-fpm:7.0.8-0ubuntu0.16.04.2
# - libapache2-mod-php7.0:7.0.8-0ubuntu0.16.04.3
# - php7.0-cgi:7.0.8-0ubuntu0.16.04.3
# - php7.0-cli:7.0.8-0ubuntu0.16.04.3
#
# CVE List:
# - CVE-2015-4116
# - CVE-2015-8873
# - CVE-2015-8876
# - CVE-2015-8935
# - CVE-2016-5093
# - CVE-2016-5094
# - CVE-2016-5095
# - CVE-2016-5096
# - CVE-2016-5114
# - CVE-2016-5385
# - CVE-2016-5399
# - CVE-2016-5768
# - CVE-2016-5769
# - CVE-2016-5771
# - CVE-2016-5773
# - CVE-2016-5772
# - CVE-2016-6288
# - CVE-2016-6289
# - CVE-2016-6290
# - CVE-2016-6291
# - CVE-2016-6292
# - CVE-2016-6294
# - CVE-2016-6295
# - CVE-2016-6296
# - CVE-2016-6297
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade php7.0-fpm=7.0.8-0ubuntu0.16.04.2 -y
sudo apt-get install --only-upgrade libapache2-mod-php7.0=7.0.8-0ubuntu0.16.04.3 -y
sudo apt-get install --only-upgrade php7.0-cgi=7.0.8-0ubuntu0.16.04.3 -y
sudo apt-get install --only-upgrade php7.0-cli=7.0.8-0ubuntu0.16.04.3 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_16.04_LTS/i386/2016/USN-3045-1.sh
|
Shell
|
mit
| 1,616 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2011:1073
#
# Security announcement date: 2011-09-01 16:10:39 UTC
# Script generation date: 2017-01-01 21:10:13 UTC
#
# Operating System: CentOS 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - bash.x86_64:3.2-32.el5
#
# Last versions recommanded by security team:
# - bash.x86_64:3.2-33.el5_11.4
#
# CVE List:
# - CVE-2008-5374
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install bash.x86_64-3.2 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_5/x86_64/2011/CESA-2011:1073.sh
|
Shell
|
mit
| 589 |
#!/bin/bash
while getopts 'i:o:' opt; do
case $opt in
i)
input=$OPTARG
;;
o)
output=$OPTARG
;;
esac
done
for file in $(ls $input)
do
./get-tweet.sh $input/$file | gzip > $output/$file
done
|
raphaottoni/twitter-lda
|
preprocess/json2tweet/get-tweet-per-user.sh
|
Shell
|
mit
| 238 |
script_dir=$(dirname "$(readlink -f "$0")")
export KB_DEPLOYMENT_CONFIG=$script_dir/../deploy.cfg
WD=/kb/module/work
if [ -f $WD/token ]; then
cat $WD/token | xargs sh $script_dir/../bin/run_ReferenceDataManager_async_job.sh $WD/input.json $WD/output.json
else
echo "File $WD/token doesn't exist, aborting."
exit 1
fi
|
kbaseapps/ReferenceDataManager
|
scripts/run_async.sh
|
Shell
|
mit
| 330 |
#!/usr/bin/env bash
# Base16 OceanicNext - Gnome Terminal color scheme install script
# https://github.com/voronianski/oceanic-next-color-scheme
[[ -z "$PROFILE_NAME" ]] && PROFILE_NAME="Base 16 OceanicNext"
[[ -z "$PROFILE_SLUG" ]] && PROFILE_SLUG="base-16-oceanicnext"
[[ -z "$DCONF" ]] && DCONF=dconf
[[ -z "$UUIDGEN" ]] && UUIDGEN=uuidgen
dset() {
local key="$1"; shift
local val="$1"; shift
if [[ "$type" == "string" ]]; then
val="'$val'"
fi
"$DCONF" write "$PROFILE_KEY/$key" "$val"
}
# Because dconf still doesn't have "append"
dlist_append() {
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$DCONF" read "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo "'$val'"
} | head -c-1 | tr "\n" ,
)"
"$DCONF" write "$key" "[$entries]"
}
# Newest versions of gnome-terminal use dconf
if which "$DCONF" > /dev/null 2>&1; then
# Check that uuidgen is available
type $UUIDGEN >/dev/null 2>&1 || { echo >&2 "Requires uuidgen but it's not installed. Aborting!"; exit 1; }
[[ -z "$BASE_KEY_NEW" ]] && BASE_KEY_NEW=/org/gnome/terminal/legacy/profiles:
if [[ -n "`$DCONF list $BASE_KEY_NEW/`" ]]; then
if which "$UUIDGEN" > /dev/null 2>&1; then
PROFILE_SLUG=`uuidgen`
fi
if [[ -n "`$DCONF read $BASE_KEY_NEW/default`" ]]; then
DEFAULT_SLUG=`$DCONF read $BASE_KEY_NEW/default | tr -d \'`
else
DEFAULT_SLUG=`$DCONF list $BASE_KEY_NEW/ | grep '^:' | head -n1 | tr -d :/`
fi
DEFAULT_KEY="$BASE_KEY_NEW/:$DEFAULT_SLUG"
PROFILE_KEY="$BASE_KEY_NEW/:$PROFILE_SLUG"
# Copy existing settings from default profile
$DCONF dump "$DEFAULT_KEY/" | $DCONF load "$PROFILE_KEY/"
# Add new copy to list of profiles
dlist_append $BASE_KEY_NEW/list "$PROFILE_SLUG"
# Update profile values with theme options
dset visible-name "'$PROFILE_NAME'"
dset palette "['#1B2B34', '#EC5f67', '#99C794', '#FAC863', '#6699CC', '#C594C5', '#5FB3B3', '#C0C5CE', '#65737E', '#F99157', '#343D46', '#4F5B66', '#A7ADBA', '#CDD3DE', '#AB7967', '#D8DEE9']"
dset background-color "'#1B2B34'"
dset foreground-color "'#C0C5CE'"
dset bold-color "'#C0C5CE'"
dset bold-color-same-as-fg "true"
dset cursor-colors-set "true"
dset cursor-background-color "'#C0C5CE'"
dset cursor-foreground-color "'#1B2B34'"
dset use-theme-colors "false"
dset use-theme-background "false"
unset PROFILE_NAME
unset PROFILE_SLUG
unset DCONF
unset UUIDGEN
exit 0
fi
fi
# Fallback for Gnome 2 and early Gnome 3
[[ -z "$GCONFTOOL" ]] && GCONFTOOL=gconftool
[[ -z "$BASE_KEY" ]] && BASE_KEY=/apps/gnome-terminal/profiles
PROFILE_KEY="$BASE_KEY/$PROFILE_SLUG"
gset() {
local type="$1"; shift
local key="$1"; shift
local val="$1"; shift
"$GCONFTOOL" --set --type "$type" "$PROFILE_KEY/$key" -- "$val"
}
# Because gconftool doesn't have "append"
glist_append() {
local type="$1"; shift
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$GCONFTOOL" --get "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo "$val"
} | head -c-1 | tr "\n" ,
)"
"$GCONFTOOL" --set --type list --list-type $type "$key" "[$entries]"
}
# Append the Base16 profile to the profile list
glist_append string /apps/gnome-terminal/global/profile_list "$PROFILE_SLUG"
gset string visible_name "$PROFILE_NAME"
gset string palette "#1B2B34:#EC5f67:#99C794:#FAC863:#6699CC:#C594C5:#5FB3B3:#C0C5CE:#65737E:#EC5f67:#99C794:#FAC863:#6699CC:#C594C5:#5FB3B3:#D8DEE9"
gset string palette "['#1B2B34', '#EC5f67', '#99C794', '#FAC863', '#6699CC', '#C594C5', '#5FB3B3', '#C0C5CE', '#65737E', '#F99157', '#343D46', '#4F5B66', '#A7ADBA', '#CDD3DE', '#AB7967', '#D8DEE9']"
gset string background_color "#1B2B34"
gset string foreground_color "#C0C5CE"
gset string bold_color "#C0C5CE"
gset bool bold_color_same_as_fg "true"
gset bool cursor-colors-set "true"
gset string cursor-background-color "'#C0C5CE'"
gset string cursor-foreground-color "'#1B2B34'"
gset bool use_theme_colors "false"
gset bool use_theme_background "false"
unset PROFILE_NAME
unset PROFILE_SLUG
unset DCONF
unset UUIDGEN
|
dcelasun/dotfiles
|
config/base16-gnome-terminal/color-scripts/base16-oceanicnext.sh
|
Shell
|
mit
| 4,385 |
#!/bin/sh
SYSTEMID_PS1=38
ABBREV_SYSTEMNAME_PS1="PSX"
SYSTEMNAME_PS1="Sony - PlayStation"
GEN_DAT_PS1='{"game":{"rom":{"serial":"romSerial","image":"romFileName"},"name":"romExtensionlessFileName"}}'
GEN_DEVELOPER_PS1='{"game":{"rom":{"serial":"romSerial","image":"romFileName"},"name":"romExtensionlessFileName","developer":"releaseDeveloper"}}'
GEN_PUBLISHER_PS1='{"game":{"rom":{"serial":"romSerial","image":"romFileName"},"name":"romExtensionlessFileName","publisher":"releasePublisher"}}'
GEN_RELEASEYEAR_PS1='{"game":{"rom":{"serial":"romSerial","image":"romFileName"},"name":"romExtensionlessFileName","releaseyear":"Year"}}'
GEN_RELEASEMONTH_PS1='{"game":{"rom":{"serial":"romSerial","image":"romFileName"},"name":"romExtensionlessFileName","releasemonth":"Month"}}'
_gen_dat_ps1() {
echo "--- ${2} - DAT"
$PYTHON ./generate-dat.py ${3} ${GEN_DAT_PS1} "${1}"
mv "${1}.dat" "dat/${2}.dat"
}
gen_dat_ps1() {
_gen_dat_ps1 "${ABBREV_SYSTEMNAME_PS1}" "${SYSTEMNAME_PS1}" ${SYSTEMID_PS1}
}
_gen_developer_ps1() {
echo "--- ${2} - DAT Developer"
$PYTHON ./generate-dat.py -s ${3} ${GEN_DEVELOPER_PS1} "${1}"
mv "${1}.dat" "metadat/developer/${2}.dat"
}
gen_developer_ps1() {
_gen_developer_ps1 "${ABBREV_SYSTEMNAME_PS1}" "${SYSTEMNAME_PS1}" ${SYSTEMID_PS1}
}
_gen_publisher_ps1() {
echo "--- ${2} - DAT Publisher"
$PYTHON ./generate-dat.py -s ${3} ${GEN_PUBLISHER_PS1} "${1}"
mv "${1}.dat" "metadat/publisher/${2}.dat"
}
gen_publisher_ps1() {
_gen_publisher_ps1 "${ABBREV_SYSTEMNAME_PS1}" "${SYSTEMNAME_PS1}" ${SYSTEMID_PS1}
}
_gen_releaseyear_ps1() {
echo "--- ${2} - DAT Releaseyear"
$PYTHON ./generate-dat.py -s ${3} ${GEN_RELEASEYEAR_PS1} "${1}"
mv "${1}.dat" "metadat/releaseyear/${2}.dat"
}
gen_releaseyear_ps1() {
_gen_releaseyear_ps1 "${ABBREV_SYSTEMNAME_PS1}" "${SYSTEMNAME_PS1}" ${SYSTEMID_PS1}
}
_gen_releasemonth_ps1() {
echo "--- ${2} - DAT Releasemonth"
$PYTHON ./generate-dat.py -s ${3} ${GEN_RELEASEMONTH_PS1} "${1}"
mv "${1}.dat" "metadat/releasemonth/${2}.dat"
}
gen_releasemonth_ps1() {
_gen_releasemonth_ps1 "${ABBREV_SYSTEMNAME_PS1}" "${SYSTEMNAME_PS1}" ${SYSTEMID_PS1}
}
_gen_ps1() {
$PYTHON ./generate-dat.py -s ${3} "${GEN_DEVELOPER_PS1}" "${GEN_PUBLISHER_PS1}" "${GEN_RELEASEYEAR_PS1}" "${GEN_RELEASEMONTH_PS1}" "${1}"
mv "${1}_1.dat" "metadat/developer/${2}.dat"
mv "${1}_2.dat" "metadat/publisher/${2}.dat"
mv "${1}_3.dat" "metadat/releaseyear/${2}.dat"
mv "${1}_4.dat" "metadat/releasemonth/${2}.dat"
}
gen_ps1() {
gen_dat_ps1
_gen_ps1 "${ABBREV_SYSTEMNAME_PS1}" "${SYSTEMNAME_PS1}" ${SYSTEMID_PS1}
}
|
libretro/libretro-dat-pull
|
scripts/Sony - PlayStation.sh
|
Shell
|
mit
| 2,625 |
#!/bin/bash
userinstall=no
steamcmd_user=
showusage=no
migrateconfig=no
installservice=no
while [ -n "$1" ]; do
case "$1" in
--me)
userinstall=yes
steamcmd_user="--me"
;;
-h|--help)
showusage=yes
break
;;
--prefix=*)
PREFIX="${1#--prefix=}"
;;
--prefix)
PREFIX="$2"
shift
;;
--exec-prefix=*)
EXECPREFIX="${1#--exec-prefix=}"
;;
--exec-prefix)
EXECPREFIX="$2"
shift
;;
--data-prefix=*)
DATAPREFIX="${1#--data-prefix=}"
;;
--data-prefix)
DATAPREFIX="$2"
shift
;;
--install-root=*)
INSTALL_ROOT="${1#--install-root=}"
;;
--install-root)
INSTALL_ROOT="$2"
shift
;;
--bindir=*)
BINDIR="${1#--bindir=}"
;;
--bindir)
BINDIR="$2"
shift
;;
--libexecdir=*)
LIBEXECDIR="${1#--libexecdir=}"
;;
--libexecdir)
LIBEXECDIR="$2"
shift
;;
--datadir=*)
DATADIR="${1#--datadir=}"
;;
--datadir)
DATADIR="$2"
shift
;;
--migrate-config)
migrateconfig=yes
;;
--install-service)
installservice=yes
;;
-*)
echo "Invalid option '$1'"
showusage=yes
break;
;;
*)
if [ -n "$steamcmd_user" ]; then
echo "Multiple users specified"
showusage=yes
break;
elif getent passwd "$1" >/dev/null 2>&1; then
steamcmd_user="$1"
else
echo "Invalid user '$1'"
showusage=yes
break;
fi
;;
esac
shift
done
if [ "$userinstall" == "yes" -a "$UID" -eq 0 ]; then
echo "Refusing to perform user-install as root"
showusage=yes
fi
if [ "$showusage" == "no" -a -z "$steamcmd_user" ]; then
echo "No user specified"
showusage=yes
fi
if [ "$userinstall" == "yes" ]; then
PREFIX="${PREFIX:-${HOME}}"
EXECPREFIX="${EXECPREFIX:-${PREFIX}}"
DATAPREFIX="${DATAPREFIX:-${PREFIX}/.local/share}"
CONFIGFILE="${PREFIX}/.arkmanager.cfg"
INSTANCEDIR="${PREFIX}/.config/arkmanager/instances"
else
PREFIX="${PREFIX:-/usr/local}"
EXECPREFIX="${EXECPREFIX:-${PREFIX}}"
DATAPREFIX="${DATAPREFIX:-${PREFIX}/share}"
CONFIGFILE="/etc/arkmanager/arkmanager.cfg"
INSTANCEDIR="/etc/arkmanager/instances"
fi
BINDIR="${BINDIR:-${EXECPREFIX}/bin}"
LIBEXECDIR="${LIBEXECDIR:-${EXECPREFIX}/libexec/arkmanager}"
DATADIR="${DATADIR:-${DATAPREFIX}/arkmanager}"
if [ "$showusage" == "yes" ]; then
echo "Usage: ./install.sh {<user>|--me} [OPTIONS]"
echo "You must specify your system steam user who own steamcmd directory to install ARK Tools."
echo "Specify the special used '--me' to perform a user-install."
echo
echo "<user> The user arkmanager should be run as"
echo
echo "Option Description"
echo "--help, -h Show this help text"
echo "--me Perform a user-install"
echo "--prefix Specify the prefix under which to install arkmanager"
echo " [PREFIX=${PREFIX}]"
echo "--exec-prefix Specify the prefix under which to install executables"
echo " [EXECPREFIX=${EXECPREFIX}]"
echo "--data-prefix Specify the prefix under which to install support files"
echo " [DATAPREFIX=${DATAPREFIX}]"
echo "--install-root Specify the staging directory in which to perform the install"
echo " [INSTALL_ROOT=${INSTALL_ROOT}]"
echo "--bindir Specify the directory under which to install executables"
echo " [BINDIR=${BINDIR}]"
echo "--libexecdir Specify the directory under which to install executable support files"
echo " [LIBEXECDIR=${LIBEXECDIR}]"
echo "--datadir Specify the directory under which to install support files"
echo " [DATADIR=${DATADIR}]"
echo "--install-service"
echo " Install the service"
exit 1
fi
if [ "$userinstall" == "yes" ]; then
# Copy arkmanager to ~/bin
mkdir -p "${INSTALL_ROOT}${BINDIR}"
cp arkmanager "${INSTALL_ROOT}${BINDIR}/arkmanager"
chmod +x "${INSTALL_ROOT}${BINDIR}/arkmanager"
# Create a folder in ~/.local/share to store arkmanager support files
mkdir -p "${INSTALL_ROOT}${DATADIR}"
# Copy the uninstall script to ~/.local/share/arkmanager
cp uninstall-user.sh "${INSTALL_ROOT}${DATADIR}/arkmanager-uninstall.sh"
chmod +x "${INSTALL_ROOT}${DATADIR}/arkmanager-uninstall.sh"
sed -i -e "s|^BINDIR=.*|BINDIR=\"${BINDIR}\"|" \
-e "s|^DATADIR=.*|DATADIR=\"${DATADIR}\"|" \
"${INSTALL_ROOT}${DATADIR}/arkmanager-uninstall.sh"
# Create a folder in ~/logs to let Ark tools write its own log files
mkdir -p "${INSTALL_ROOT}${PREFIX}/logs/arktools"
# Create a folder in ~/.config/arkamanger to hold instance configs
mkdir -p "${INSTALL_ROOT}${INSTANCEDIR}"
# Copy example instance config
cp instance.cfg.example "${INSTALL_ROOT}/${INSTANCEDIR}/instance.cfg.example"
# Change the defaults in the new instance config template
sed -i -e "s|\"/home/steam|\"${PREFIX}|" \
"${INSTALL_ROOT}${INSTANCEDIR}/instance.cfg.example"
# Copy arkmanager.cfg to ~/.arkmanager.cfg.NEW
cp arkmanager.cfg "${INSTALL_ROOT}${CONFIGFILE}.example"
# Change the defaults in the new config file
sed -i -e "s|^steamcmd_user=\"steam\"|steamcmd_user=\"--me\"|" \
-e "s|\"/home/steam|\"${PREFIX}|" \
-e "s|/var/log/arktools|${PREFIX}/logs/arktools|" \
-e "s|^install_bindir=.*|install_bindir=\"${BINDIR}\"|" \
-e "s|^install_libexecdir=.*|install_libexecdir=\"${LIBEXECDIR}\"|" \
-e "s|^install_datadir=.*|install_datadir=\"${DATADIR}\"|" \
"${INSTALL_ROOT}${CONFIGFILE}.example"
# Copy arkmanager.cfg to ~/.arkmanager.cfg if it doesn't already exist
if [ -f "${INSTALL_ROOT}${CONFIGFILE}" ]; then
SUFFIX=
if [ "$migrateconfig" = "no" ]; then
SUFFIX=".NEW"
cp "${INSTALL_ROOT}${CONFIGFILE}" "${INSTALL_ROOT}${CONFIGFILE}${SUFFIX}"
fi
bash ./migrate-config.sh "${INSTALL_ROOT}${CONFIGFILE}${SUFFIX}"
bash ./migrate-main-instance.sh "${INSTALL_ROOT}${CONFIGFILE}${SUFFIX}" "${INSTALL_ROOT}${INSTANCEDIR}/main.cfg${SUFFIX}"
echo "A previous version of ARK Server Tools was detected in your system, your old configuration was not overwritten. You may need to manually update it."
echo "A copy of the new configuration file was included in '${CONFIGFILE}.NEW'. Make sure to review any changes and update your config accordingly!"
exit 2
else
cp -n "${INSTALL_ROOT}${CONFIGFILE}.example" "${INSTALL_ROOT}${CONFIGFILE}"
cp -n "${INSTALL_ROOT}/${INSTANCEDIR}/instance.cfg.example" "${INSTALL_ROOT}/${INSTANCEDIR}/main.cfg"
fi
else
# Copy arkmanager to /usr/bin and set permissions
cp arkmanager "${INSTALL_ROOT}${BINDIR}/arkmanager"
chmod +x "${INSTALL_ROOT}${BINDIR}/arkmanager"
# Copy the uninstall script to ~/.local/share/arkmanager
mkdir -p "${INSTALL_ROOT}${LIBEXECDIR}"
cp uninstall.sh "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager-uninstall.sh"
chmod +x "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager-uninstall.sh"
sed -i -e "s|^BINDIR=.*|BINDIR=\"${BINDIR}\"|" \
-e "s|^LIBEXECDIR=.*|LIBEXECDIR=\"${LIBEXECDIR}\"|" \
-e "s|^DATADIR=.*|DATADIR=\"${DATADIR}\"|" \
"${INSTALL_ROOT}${LIBEXECDIR}/arkmanager-uninstall.sh"
if [ "$installservice" = "yes" ]; then
# Copy arkdaemon to /etc/init.d ,set permissions and add it to boot
if [ -f /lib/lsb/init-functions ]; then
# on debian 8, sysvinit and systemd are present. If systemd is available we use it instead of sysvinit
if [ -f /etc/systemd/system.conf ]; then # used by systemd
mkdir -p "${INSTALL_ROOT}${LIBEXECDIR}"
cp systemd/arkmanager.init "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
sed -i "s|^DAEMON=\"/usr/bin/|DAEMON=\"${BINDIR}/|" "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
chmod +x "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
cp systemd/arkmanager.service "${INSTALL_ROOT}/etc/systemd/system/arkmanager.service"
sed -i "s|=/usr/libexec/arkmanager/|=${LIBEXECDIR}/|" "${INSTALL_ROOT}/etc/systemd/system/arkmanager.service"
cp systemd/[email protected] "${INSTALL_ROOT}/etc/systemd/system/[email protected]"
sed -i "s|=/usr/bin/|=${BINDIR}/|;s|=steam$|=${steamcmd_user}|" "${INSTALL_ROOT}/etc/systemd/system/[email protected]"
if [ -z "${INSTALL_ROOT}" ]; then
systemctl daemon-reload
systemctl enable arkmanager.service
echo "Ark server will now start on boot, if you want to remove this feature run the following line"
echo "systemctl disable arkmanager.service"
fi
else # systemd not present, so use sysvinit
cp lsb/arkdaemon "${INSTALL_ROOT}/etc/init.d/arkmanager"
chmod +x "${INSTALL_ROOT}/etc/init.d/arkmanager"
sed -i "s|^DAEMON=\"/usr/bin/|DAEMON=\"${BINDIR}/|" "${INSTALL_ROOT}/etc/init.d/arkmanager"
# add to startup if the system use sysinit
if [ -x /usr/sbin/update-rc.d -a -z "${INSTALL_ROOT}" ]; then
update-rc.d arkmanager defaults
echo "Ark server will now start on boot, if you want to remove this feature run the following line"
echo "update-rc.d -f arkmanager remove"
fi
fi
elif [ -f /etc/rc.d/init.d/functions ]; then
# on RHEL 7, sysvinit and systemd are present. If systemd is available we use it instead of sysvinit
if [ -f /etc/systemd/system.conf ]; then # used by systemd
mkdir -p "${INSTALL_ROOT}${LIBEXECDIR}"
cp systemd/arkmanager.init "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
sed -i "s|^DAEMON=\"/usr/bin/|DAEMON=\"${BINDIR}/|" "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
chmod +x "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
cp systemd/arkmanager.service "${INSTALL_ROOT}/etc/systemd/system/arkmanager.service"
sed -i "s|=/usr/libexec/arkmanager/|=${LIBEXECDIR}/|" "${INSTALL_ROOT}/etc/systemd/system/arkmanager.service"
cp systemd/[email protected] "${INSTALL_ROOT}/etc/systemd/system/[email protected]"
sed -i "s|=/usr/bin/|=${BINDIR}/|;s|=steam$|=${steamcmd_user}|" "${INSTALL_ROOT}/etc/systemd/system/[email protected]"
if [ -z "${INSTALL_ROOT}" ]; then
systemctl daemon-reload
systemctl enable arkmanager.service
echo "Ark server will now start on boot, if you want to remove this feature run the following line"
echo "systemctl disable arkmanager.service"
fi
else # systemd not preset, so use sysvinit
cp redhat/arkdaemon "${INSTALL_ROOT}/etc/rc.d/init.d/arkmanager"
chmod +x "${INSTALL_ROOT}/etc/rc.d/init.d/arkmanager"
sed -i "s@^DAEMON=\"/usr/bin/@DAEMON=\"${BINDIR}/@" "${INSTALL_ROOT}/etc/rc.d/init.d/arkmanager"
if [ -x /sbin/chkconfig -a -z "${INSTALL_ROOT}" ]; then
chkconfig --add arkmanager
echo "Ark server will now start on boot, if you want to remove this feature run the following line"
echo "chkconfig arkmanager off"
fi
fi
elif [ -f /sbin/runscript ]; then
cp openrc/arkdaemon "${INSTALL_ROOT}/etc/init.d/arkmanager"
chmod +x "${INSTALL_ROOT}/etc/init.d/arkmanager"
sed -i "s@^DAEMON=\"/usr/bin/@DAEMON=\"${BINDIR}/@" "${INSTALL_ROOT}/etc/init.d/arkmanager"
if [ -x /sbin/rc-update -a -z "${INSTALL_ROOT}" ]; then
rc-update add arkmanager default
echo "Ark server will now start on boot, if you want to remove this feature run the following line"
echo "rc-update del arkmanager default"
fi
elif [ -f /etc/systemd/system.conf ]; then # used by systemd
mkdir -p "${INSTALL_ROOT}${LIBEXECDIR}"
cp systemd/arkmanager.init "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
sed -i "s|^DAEMON=\"/usr/bin/|DAEMON=\"${BINDIR}/|" "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
chmod +x "${INSTALL_ROOT}${LIBEXECDIR}/arkmanager.init"
cp systemd/arkmanager.service "${INSTALL_ROOT}/etc/systemd/system/arkmanager.service"
sed -i "s|=/usr/libexec/arkmanager/|=${LIBEXECDIR}/|" "${INSTALL_ROOT}/etc/systemd/system/arkmanager.service"
cp systemd/[email protected] "${INSTALL_ROOT}/etc/systemd/system/[email protected]"
sed -i "s|=/usr/bin/|=${BINDIR}/|;s|=steam$|=${steamcmd_user}|" "${INSTALL_ROOT}/etc/systemd/system/[email protected]"
if [ -z "${INSTALL_ROOT}" ]; then
systemctl daemon-reload
systemctl enable arkmanager.service
echo "Ark server will now start on boot, if you want to remove this feature run the following line"
echo "systemctl disable arkmanager.service"
fi
fi
fi
# Create a folder in /var/log to let Ark tools write its own log files
mkdir -p "${INSTALL_ROOT}/var/log/arktools"
chown "$steamcmd_user" "${INSTALL_ROOT}/var/log/arktools"
# Create a folder in /etc/arkmanager to hold instance config files
mkdir -p "${INSTALL_ROOT}${INSTANCEDIR}"
chown "$steamcmd_user" "${INSTALL_ROOT}${INSTANCEDIR}"
# Copy example instance config
cp instance.cfg.example "${INSTALL_ROOT}${INSTANCEDIR}/instance.cfg.example"
chown "$steamcmd_user" "${INSTALL_ROOT}${INSTANCEDIR}/instance.cfg.example"
# Change the defaults in the new instance config template
sed -i -e "s|\"/home/steam|\"/home/$steamcmd_user|" \
"${INSTALL_ROOT}${INSTANCEDIR}/instance.cfg.example"
# Copy arkmanager bash_completion into /etc/bash_completion.d/
mkdir -p "${INSTALL_ROOT}/etc/bash_completion.d"
cp bash_completion/arkmanager "${INSTALL_ROOT}/etc/bash_completion.d/arkmanager"
# Copy arkmanager.cfg inside linux configuation folder if it doesn't already exists
mkdir -p "${INSTALL_ROOT}/etc/arkmanager"
chown "$steamcmd_user" "${INSTALL_ROOT}/etc/arkmanager"
cp arkmanager.cfg "${INSTALL_ROOT}${CONFIGFILE}.example"
chown "$steamcmd_user" "${INSTALL_ROOT}${CONFIGFILE}.example"
sed -i -e "s|^steamcmd_user=\"steam\"|steamcmd_user=\"$steamcmd_user\"|" \
-e "s|\"/home/steam|\"/home/$steamcmd_user|" \
-e "s|^install_bindir=.*|install_bindir=\"${BINDIR}\"|" \
-e "s|^install_libexecdir=.*|install_libexecdir=\"${LIBEXECDIR}\"|" \
-e "s|^install_datadir=.*|install_datadir=\"${DATADIR}\"|" \
"${INSTALL_ROOT}${CONFIGFILE}.example"
if [ -f "${INSTALL_ROOT}${CONFIGFILE}" ]; then
SUFFIX=
if [ "$migrateconfig" = "no" ]; then
SUFFIX=".NEW"
cp "${INSTALL_ROOT}${CONFIGFILE}" "${INSTALL_ROOT}${CONFIGFILE}${SUFFIX}"
fi
bash ./migrate-config.sh "${INSTALL_ROOT}${CONFIGFILE}${SUFFIX}"
bash ./migrate-main-instance.sh "${INSTALL_ROOT}${CONFIGFILE}${SUFFIX}" "${INSTALL_ROOT}${INSTANCEDIR}/main.cfg${SUFFIX}"
echo "A previous version of ARK Server Tools was detected in your system, your old configuration was not overwritten. You may need to manually update it."
echo "A copy of the new configuration file was included in /etc/arkmanager. Make sure to review any changes and update your config accordingly!"
exit 2
else
cp -n "${INSTALL_ROOT}${CONFIGFILE}.example" "${INSTALL_ROOT}${CONFIGFILE}"
cp -n "${INSTALL_ROOT}/${INSTANCEDIR}/instance.cfg.example" "${INSTALL_ROOT}/${INSTANCEDIR}/main.cfg"
fi
fi
exit 0
|
FezVrasta/ark-server-tools
|
tools/install.sh
|
Shell
|
mit
| 15,733 |
#!/bin/sh
# Source: http://developer.android.com/tools/testing/testing_otheride.html#RunTestsCommand
adb shell am instrument -w -r at.mfellner.android.cucumber.example.test/at.mfellner.cucumber.android.api.CucumberInstrumentation
|
mfellner/cucumber-android
|
cucumber-example-test/run_test.sh
|
Shell
|
mit
| 230 |
#!/usr/bin/env bash
set -e
WORK_TREE=~
GIT_DIR=~/.baredot
ALIAS_NAME=baredot
CMD="/usr/bin/git --work-tree=$WORK_TREE --git-dir=$GIT_DIR"
ALIAS_CMD="alias $ALIAS_NAME='$CMD'"
function explain() {
printf '\e[33m'
echo "$@"
printf '\e[m'
}
function perform() {
echo "+ $1"
eval $1
}
if [ $# -eq 0 ]; then
explain "Initializing a bare git repository for $WORK_TREE at $GIT_DIR:"
perform "$CMD init"
# `perform "git init --bare $GIT_DIR"` would also work
explain "Ensuring that only manually added files are shown in status:"
perform "$CMD config status.showUntrackedFiles no"
explain "That's it! Please add the following \`$ALIAS_NAME\` alias to your" \
"shell startup script to access the repository:"
printf '\e[32m'
echo $ALIAS_CMD
printf '\e[m'
else
REPO="$1"
explain "Cloning $REPO into $GIT_DIR:"
perform "$CMD clone --no-checkout $REPO"
fi
|
baredot/baredot.github.io
|
setup.sh
|
Shell
|
mit
| 930 |
brew install github
|
velocityzen/station
|
setup/github.sh
|
Shell
|
mit
| 20 |
#!/bin/bash
wget -q -O - https://dl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" | sudo tee /etc/apt/sources.list.d/google-chrome.list
sudo apt install -y google-chrome-stable
|
Vorago/dotfiles
|
packages/chrome/install.sh
|
Shell
|
mit
| 267 |
#!/bin/bash
ALLVAR=""
for var in "$@"
do
ALLVAR="$ALLVAR $var"
done
if [ "$ALLVAR" = "" ]; then
ALLVAR='plotting'
fi
python PlaneProc.py -f $ALLVAR -i ~/DATA/PlaneProcessing/Phased_Array/exp_width_stat_01 -c ~/DATA/PlaneProcessing/Phased_Array/planeproc2_stat.ini -r y
cp ~/DATA/PlaneProcessing/Phased_Array/exp_width_stat_01/Inputimages/*.png ~/Dropbox/PlaneProcessing/Stationary/Inputimages/
cp ~/DATA/PlaneProcessing/Phased_Array/exp_width_stat_01/fittedimages/*.png ~/Dropbox/PlaneProcessing/Stationary/fittedimages/
cp ~/DATA/PlaneProcessing/Phased_Array/exp_width_stat_01/fittederroronlyimages/*.png ~/Dropbox/PlaneProcessing/Stationary/fittederroronlyimages
|
jswoboda/PlaneProcessing
|
runplanestat.sh
|
Shell
|
mit
| 679 |
#!/bin/bash -e
function main()
{
source "$(dirname "${BASH_SOURCE[0]}")/../libraries/util.bash"
# Validations
checkRequireMacSystem
checkRequireRootUser
# List and Update
header 'LISTING AVAILABLE SOFTWARE UPDATES'
softwareupdate --list
header 'UPDATING SOFTWARE'
softwareupdate --all --force --install
}
main "${@}"
|
gdbtek/mac-cookbooks
|
tools/update-software.bash
|
Shell
|
mit
| 359 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2012:0109
#
# Security announcement date: 2012-02-15 16:24:41 UTC
# Script generation date: 2016-05-12 18:10:40 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - rhev-hypervisor6.noarch:6.2-20120209.0.el6_2
# - rhev-hypervisor6-tools.noarch:6.2-20120209.0.el6_2
#
# Last versions recommanded by security team:
# - rhev-hypervisor6.noarch:6.7-20150828.0.el6ev
# - rhev-hypervisor6-tools.noarch:6.3-20121012.0.el6_3
#
# CVE List:
# - CVE-2011-4576
# - CVE-2011-4577
# - CVE-2011-4619
# - CVE-2012-0029
# - CVE-2009-5029
# - CVE-2011-4609
# - CVE-2012-0056
# - CVE-2011-4108
# - CVE-2012-0050
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install rhev-hypervisor6.noarch-6.7 -y
sudo yum install rhev-hypervisor6-tools.noarch-6.3 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2012/RHSA-2012:0109.sh
|
Shell
|
mit
| 970 |
# .bashrc
PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
|
shawndfl/fedora-developer
|
roles/developer/files/bashrc.d/git.sh
|
Shell
|
mit
| 51 |
#!/bin/sh
# cli tools
brew install ack
brew install tree
brew install wget
brew install caskroom/cask/brew-cas
brew install grep --default-names
brew install findutils --default-names
brew install watch
brew install coreutils
# development server setup
brew install nginx
# development tools
brew install git
brew install hub
brew install macvim --override-system-vim
brew install ctags-exuberant
brew install reattach-to-user-namespace
brew install tmux
brew install zsh
brew install highlight
brew install nvm
brew install z
brew install elasticsearch
brew cask install qlmarkdown
brew install the_silver_searcher
#general apps
brew cask install google-chrome
# gitsh
brew tap thoughtbot/formulae
brew install gitsh
|
cnak/dotfiles
|
scripts/brew.sh
|
Shell
|
mit
| 725 |
docker run -it --rm magnitus/automated-tests:0.10-node-express-access-control;
docker run -it --rm magnitus/automated-tests:4.1-node-express-access-control;
|
Magnitus-/DockerFiles
|
automated-tests/express-access-control/run.sh
|
Shell
|
mit
| 157 |
#!/bin/sh
plat=darwin
arch=x64
if [ -f ./dist-common.sh ]; then
. ./dist-common.sh
else
echo '*** This must be run from the top-level qmk_flasher directory!'
exit 1
fi
# Build the macOS package
echo '*** Building macOS package'
electron-packager ./ --platform=$plat --arch=$arch \
--asar.unpackDir='**/{dfu,node_modules/fsevents}' \
--osx-sign=false \
--icon=build/icon.iconset \
--out="$output_dir" \
--overwrite=true \
--prune \
--ignore 'dist/darwin'
# Zip up the OSX package
echo '*** Creating package archive.'
(
cp LICENSE.md "$output_dir"/"$package_dir"
cd "$output_dir"/"$package_dir"
mv LICENSE LICENSE.electron.txt
zip -y -r "../$zip_file" 'QMK Flasher.app' \
LICENSE.md LICENSE.electron.txt LICENSES.chromium.html
)
check_zip
|
jackhumbert/dfu-programmer-app
|
dist-darwin.sh
|
Shell
|
mit
| 756 |
#!/bin/bash -e
################################################################################################
## @copyright 2016 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/tag_v1/LICENSE
##
## File : stop_old_containers.sh
## Author: doungni
## Co-Author : Denny <[email protected]>
## Description : Stop old long-run docker containers, to save OS resource
## --
## Created : <2015-12-03>
## Updated: Time-stamp: <2017-09-04 18:54:39>
##-------------------------------------------------------------------
################################################################################################
# * By Jenkins config
# keep_days : Over a given period of time will be stop
# docker_ip_port: Docker daemon server ip:port
# regular_white_list: Regular expressions are supported
# * By define parameter
# ssh_identity_file ssh_connet white_list running_contianer_names
# stop_container_list flag count_v container_name container_start_sd
# container_start_ts server_current_ts
################################################################################################
. /etc/profile
[ -n "$DOWNLOAD_TAG_NAME" ] || export DOWNLOAD_TAG_NAME="tag_v6"
export DOWNLOAD_PREFIX="https://raw.githubusercontent.com/DennyZhang/devops_public/${DOWNLOAD_TAG_NAME}"
if [ ! -f /var/lib/devops/refresh_common_library.sh ]; then
[ -d /var/lib/devops/ ] || (sudo mkdir -p /var/lib/devops/ && sudo chmod 777 /var/lib/devops)
wget -O /var/lib/devops/refresh_common_library.sh "$DOWNLOAD_PREFIX/common_library/refresh_common_library.sh"
chmod 777 /var/lib/devops/refresh_common_library.sh
fi
bash /var/lib/devops/refresh_common_library.sh "3536991806" "/var/lib/devops/devops_common_library.sh" \
"${DOWNLOAD_PREFIX}/common_library/devops_common_library.sh"
. /var/lib/devops/devops_common_library.sh
################################################################################################
# TODO: Code quality of this file is low, need to refine or even re-write
# Docker client version should newer than 1.7.1
function stop_expired_container() {
# Save running container names
running_container_names=($($ssh_connect docker ps | awk '{print $NF}' | sed '1d'))
log "Docker daemon: $daemon_ip:$daemon_port current running container list[${#running_container_names[@]}]:\n${running_container_names[*]}"
# Continue to traverse the currently running container on the server
for container_name in "${running_container_names[@]}"; do
# parameter: container_start_sd, container_start_ts server_current_ts only used in the Docker version 1.9.1
# time format:standard -> sd, timestamp -> ts; use: "docker inspect -f"-format the output using the given go template
container_start_sd=$($ssh_connect docker inspect -f '{{.State.StartedAt}}' "$container_name")
container_start_ts=$($ssh_connect date +%s -d "$container_start_sd")
# get remote server current timestamp
server_current_ts=$($ssh_connect date +%s)
# 1day =24h =1440min =86400s
if [ $((server_current_ts-container_start_ts)) -lt $((keep_days*86400)) ]; then
continue
fi
# Count value
local count_v=0
local flag=0
if [ ${#white_list[@]} -gt 0 ]; then
# Mark variable
for white_name in "${white_list[@]}"; do
# Find the container in the white list and mark it as 1
if [ "$container_name" = "$white_name" ]; then
flag=1
break
fi
done
fi
if [ $flag -eq 0 ]; then
log "Stop Container: [$container_name]"
$ssh_connect docker stop "$container_name"
# Store is not white list and the need to stop the container
stop_container[$count_v]=$container_name
count_v=$((count_v+1))
fi
done
}
############################## Shell Start #####################################################
ssh_identity_file="$HOME/.ssh/id_rsa"
ensure_variable_isset "docker_ip_port parameter must be set" "$docker_ip_port"
# Jenkins parameter judge
if [ "$keep_days" -lt 0 ]; then
log "ERROR: $keep_days must be greater than or equal to 0"
exit 1
fi
docker_ip_port=(${docker_ip_port// / })
if [ -n "$regular_white_list" ]; then
regular_white_list=(${regular_white_list// / })
else
log "Regular white list is empty, will stop over than $keep_days all containers"
fi
for ip_port in "${docker_ip_port[@]}"; do
daemon_ip_port=(${ip_port//:/ })
daemon_ip=${daemon_ip_port[0]}
daemon_port=${daemon_ip_port[1]}
# Server Ip:Port connect judge
nc_return=$(nc -w 1 "$daemon_ip" "$daemon_port" 1>/dev/null 2>&1 && echo yes || echo no)
if [ "x$nc_return" == "xno" ]; then
log "Error: Can not connect docker daemon server $daemon_ip:$daemon_port"
exit 1
fi
# SSH connect parameter
ssh_connect="ssh -p $daemon_port -i $ssh_identity_file -o StrictHostKeyChecking=no root@$daemon_ip"
if [ ${#regular_white_list[@]} -gt 0 ]; then
for regular in "${regular_white_list[@]}"; do
regular_list=($($ssh_connect docker ps | awk '{print $NF}' | sed '1d' | grep -e "^$regular"))||true
white_list+=("${regular_list[@]}")
done
log "Docker daemon $daemon_ip:$daemon_port white list[${#white_list[@]}]:\n${white_list[*]}"
fi
# Call stop expired container function
stop_expired_container
log "Docker daemon server: $daemon_ip:$daemon_port operation is completed!"
stop_container_list+=("\n${daemon_ip}:${daemon_port} stop container list:\n${stop_container[@]}")
# Empty current ip:port white list
unset 'white_list[@]'
done
if [ ${#stop_container[@]} -gt 0 ]; then
log "${stop_container_list[@]}"
exit 1
else
log "Did not stop any containers"
fi
############################## Shell End #######################################################
|
DennyZhang/devops_public
|
jenkins_scripts/docker/stop_old_containers.sh
|
Shell
|
mit
| 6,065 |
#!/usr/bin/env bash
echo "waiting for cassandra to start"
# FIXME: timeout is not included
wait-for-it xkbkairosdbcassandra:9042
echo "cassandra started"
/opt/kairosdb/bin/kairosdb.sh run
|
xephonhq/xephon-k
|
_legacy/script/kairosdb/node/kairosdb.sh
|
Shell
|
mit
| 191 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.