code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/usr/bin/env zsh
# zsh options
setopt COMBINING_CHARS # Display accented chars as one char.
setopt INTERACTIVE_COMMENTS # Allow comments in shell.
setopt RC_QUOTES # Allow 'A''s' to mean 'A'\''s'.
setopt NO_BEEP # Disable beeps.
setopt LONG_LIST_JOBS # List jobs in the long format.
setopt NOTIFY # Notify about background jobs immediately.
setopt NO_BG_NICE # Do not run background jobs at a lower priority.
setopt NO_HUP # Keep jobs alive on shell exit.
setopt NO_CHECK_JOBS # Disable warning that checks for running jobs.
setopt AUTO_PUSHD # Make `cd` work like `pushd`.
setopt PUSHD_SILENT # Avoid printing the stack after `pushd`/`popd`.
setopt MULTIOS # Write to multiple descriptors.
setopt EXTENDED_GLOB # Make globs more powerful.
setopt NO_CASE_GLOB # Make globs case-insensitive.
setopt COMPLETE_IN_WORD # Complete from both sides of a word.
setopt AUTO_MENU # Show completion menu on a successive tab press.
setopt AUTO_LIST # Automatically list choices on ambiguous completion.
setopt AUTO_PARAM_SLASH # Add a slash automatically after a directory name.
setopt NO_FLOW_CONTROL # Disable start/stop characters.
# zsh history
HISTFILE="$HOME/.zsh_history"
HISTSIZE=10000
SAVEHIST=10000
setopt EXTENDED_HISTORY # Use the ":start:elapsed;command" format for history entries.
setopt INC_APPEND_HISTORY # Append to history file during shell session.
setopt SHARE_HISTORY # Share history between sessions.
setopt HIST_IGNORE_ALL_DUPS # Delete older duplicated events from history.
setopt HIST_FIND_NO_DUPS # Avoid displaying duplicate history entries.
setopt HIST_SAVE_NO_DUPS # Avoid saving duplicate history entries.
setopt HIST_VERIFY # Do not execute history expanded lines.
# Case-insensitive, pattern-word, and substring completion.
zstyle ":completion:*" matcher-list "m:{[:lower:][:upper:]}={[:upper:][:lower:]}" "r:|[._-]=* r:|=*" "l:|=* r:|=*"
# Cache completion
zstyle ":completion::complete:*" use-cache on
# ssh completion
zstyle -a ":prezto:module:completion:*:hosts" etc-host-ignores "_etc_host_ignores"
zstyle -e ":completion:*:hosts" hosts 'reply=(
${=${=${=${${(f)"$(cat {/etc/ssh/ssh_,~/.ssh/}known_hosts(|2)(N) 2> /dev/null)"}%%[#| ]*}//\]:[0-9]*/ }//,/ }//\[/ }
${=${(f)"$(cat /etc/hosts(|)(N) <<(ypcat hosts 2> /dev/null))"}%%(\#${_etc_host_ignores:+|${(j:|:)~_etc_host_ignores}})*}
${=${${${${(@M)${(f)"$(cat ~/.ssh/config 2> /dev/null)"}:#Host *}#Host }:#*\**}:#*\?*}}
)'
zstyle ":completion:*:(ssh|scp|rsync):*" tag-order "hosts:-host:host hosts:-domain:domain hosts:-ipaddr:ip\ address *"
zstyle ":completion:*:(scp|rsync):*" group-order users files all-files hosts-domain hosts-host hosts-ipaddr
zstyle ":completion:*:ssh:*" group-order users hosts-domain hosts-host users hosts-ipaddr
zstyle ":completion:*:(ssh|scp|rsync):*:hosts-host" ignored-patterns "*(.|:)*" loopback ip6-loopback localhost ip6-localhost broadcasthost
zstyle ":completion:*:(ssh|scp|rsync):*:hosts-domain" ignored-patterns "<->.<->.<->.<->" "^[-[:alnum:]]##(.[-[:alnum:]]##)##" "*@*"
zstyle ":completion:*:(ssh|scp|rsync):*:hosts-ipaddr" ignored-patterns "^(<->.<->.<->.<->|(|::)([[:xdigit:].]##:(#c,2))##(|%*))" "127.0.0.<->" "255.255.255.255" "::1" "fe80::*"
# zsh functions
autoload -Uz bracketed-paste-url-magic
zle -N bracketed-paste bracketed-paste-url-magic
# Source antibody plugins
[[ -f "$HOME/.zsh-plugins.sh" ]] && source "$HOME/.zsh-plugins.sh"
# zsh features
autoload -Uz compinit
compinit
autoload -Uz zmv
alias zcp="zmv -C"
alias zln="zmv -L"
# bash emulation
autoload -Uz bashcompinit
bashcompinit
# zsh-syntax-highlighting
ZSH_HIGHLIGHT_HIGHLIGHTERS=(main brackets)
# Enable proper backspacing in vi mode
bindkey "^?" backward-delete-char
bindkey "^W" backward-kill-word
bindkey "^H" backward-delete-char
bindkey "^U" backward-kill-line
# zsh-history-substring-search
bindkey "^[[A" history-substring-search-up
bindkey "^[[B" history-substring-search-down
bindkey -M vicmd "k" history-substring-search-up
bindkey -M vicmd "j" history-substring-search-down
# Search backwards and forwards
bindkey -M vicmd "/" history-incremental-search-backward
bindkey -M vicmd "?" history-incremental-search-forward
# Replace suffixes with spaces before certain chars (`man zshparam`)
ZLE_SPACE_SUFFIX_CHARS=$'&|'
|
mc10/dotfiles
|
zsh/core.zsh
|
Shell
|
mit
| 4,440 |
pbpaste | sort | uniq -c | sort -rn | head -10 | pbcopy
echo "Copied"
|
EvanLovely/clipboard-actions
|
Lines - top 10 unique lines.sh
|
Shell
|
mit
| 70 |
#!/bin/bash -l
#SBATCH
#SBATCH --job-name=glm_3_nodes_20_calls_parallel.sh
#SBATCH --time=03:00:00
#SBATCH --mail-type=begin,end
#SBATCH [email protected]
#SBATCH --nodes=3
#SBATCH --partition=parallel
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
module load netcdf/intel/4.3.3.1 glm
cd /home-3/[email protected]/work/kaw/GLM_Wrapper/GLM_Executables/examples_2.2/coldlake/fabm
glm
|
karoraw1/GLM_Wrapper
|
MARCCTEST/glm_3_nodes_20_calls_parallel.sh
|
Shell
|
mit
| 2,895 |
#!/usr/bin/env bash
export PROJECT_NAME=nasabot
export PYTHONPATH=${PYTHONPATH}:$(pwd)
echo "PYTHONPATH"
echo ${PYTHONPATH}
echo "====================================================="
echo ""
echo " Setup"
echo ""
echo "====================================================="
python ./${PROJECT_NAME}/main.py --setup
echo "====================================================="
echo ""
echo " Start"
echo ""
echo "====================================================="
gunicorn ${PROJECT_NAME}.wsgi:app --bind 0.0.0.0:${PORT} --log-file - --reload --worker-class aiohttp.worker.GunicornWebWorker
|
botstory/nasa-bot
|
scripts/start.sh
|
Shell
|
mit
| 602 |
#! /bin/bash
set -e
DOCKER_BIN=$(which docker)
LIBDEV_BIN=$(ldconfig -p | grep 'libdevmapper.so.1.02' | awk '{print $4}')
LIBDEV_BIN_NAME=$(echo $LIBDEV_BIN | sed 's#.*/##')
TEMP_DIR=tmp-ansible-build
DOCKER_GID=$(id docker -g)
mkdir -p $TEMP_DIR
cp $DOCKER_BIN $TEMP_DIR
cp $LIBDEV_BIN $TEMP_DIR
cat << EOF > $TEMP_DIR/Dockerfile && docker build -t node.local/ansible:latest $TEMP_DIR
FROM docker.io/library/debian:latest
COPY docker $DOCKER_BIN
COPY $LIBDEV_BIN_NAME /usr/lib/$LIBDEV_BIN_NAME
RUN apt-get update \
&& apt-get install -y python-pip python-dev git sudo vim
RUN pip install PyYAML jinja2 paramiko httplib2
RUN git clone -b stable-2.0.0.1 https://github.com/ansible/ansible.git --recursive \
&& cd ansible \
&& make install
RUN mkdir /etc/ansible \
&& cp /ansible/examples/hosts /etc/ansible/hosts.bak \
&& echo "[local]\\nlocalhost\\n" > /etc/ansible/hosts \
&& echo "[defaults]\\nremote_tmp=/tmp/.ansible/tmp\\n"
RUN groupadd -g $DOCKER_GID docker \
&& useradd --create-home ansible \
&& usermod -aG ansible,docker ansible \
&& echo "%ansible ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
USER ansible
WORKDIR /home/ansible
EOF
# Cleanup
rm -Rf $TEMP_DIR
|
xynova/docker-autopilot-repo-watcher
|
provision/docker-infra/build-ansible.sh
|
Shell
|
mit
| 1,190 |
#!/bin/bash
set -xv
export SUBARUDIR=/gpfs/slac/kipac/fs1/u/awright/SUBARU/ ; export INSTRUMENT=SUBARU
export bonn=/u/ki/awright/wtgpipeline/
export subdir=/gpfs/slac/kipac/fs1/u/awright/SUBARU/
export cluster=MACS0416-24
export ending="OCFR"
export filter="W-C-RC"
. progs.ini > /tmp/out 2>&1
. SUBARU.ini > /tmp/out 2>&1
# instead of : ./parallel_manager.sh ./resample_coadd_swarp_chips_para.sh /u/ki/awright/data/MACS0416-24/W-C-RC SCIENCE OCFR.sub MACS0416-24_good /u/ki/awright/wtgpipeline MACS0416-24_all
#run this:
./parallel_manager.sh ./resample_coadd_swarp_chips_para.sh /u/ki/awright/data/MACS0416-24/W-C-RC SCIENCE OCFR.sub MACS0416-24_good /u/ki/awright/wtgpipeline MACS0416-24_all
|
deapplegate/wtgpipeline
|
adamSep15_test_non-parallel_resamp.sh
|
Shell
|
mit
| 698 |
#!/bin/bash
# this example sets the session "kodi" as default for the user 1001.
dbus-send --system --type=method_call --print-reply --dest=org.freedesktop.Accounts /org/freedesktop/Accounts/User1001 org.freedesktop.Accounts.User.SetXSession string:kodi
|
tiagoprn/devops
|
shellscripts/utils/set_default_xsession.sh
|
Shell
|
mit
| 255 |
#!/bin/bash
# Here is where you'd want to stop your http daemon. For example:
sudo service php5-fpm stop
sudo service nginx stop
|
pipindex/flarum
|
deployment_scripts/stop_nginx.sh
|
Shell
|
mit
| 130 |
#!/usr/bin/expect
set timeout 3600
spawn scp -r assets [email protected]:/opt/platform/dist/
expect {
"*yes/no*" {
send "yes\n";
exp_continue;
}
"[email protected]'s password:*" {
send "Hello1234\r";
exp_continue;
}
}
spawn scp -r bower_components [email protected]:/opt/platform/dist/
expect {
"*yes/no*" {
send "yes\n";
exp_continue;
}
"[email protected]'s password:*" {
send "Hello1234\r";
exp_continue;
}
}
spawn scp -r build [email protected]:/opt/platform/dist/
expect {
"*yes/no*" {
send "yes\n";
exp_continue;
}
"[email protected]'s password:*" {
send "Hello1234\r";
exp_continue;
}
}
spawn scp -r index.html [email protected]:/opt/platform/dist/
expect {
"*yes/no*" {
send "yes\n";
exp_continue;
}
"[email protected]'s password:*" {
send "Hello1234\r";
exp_continue;
}
}
|
Hive-Team/venus
|
app/run-pre.sh
|
Shell
|
mit
| 801 |
#!/usr/bin/env bash
echo "--- Running jasmine tests ---"
jasmine-node tests/spec/api/
|
AdamTorkelsson/WrapMyInfo
|
bin/test.sh
|
Shell
|
mit
| 85 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:1786
#
# Security announcement date: 2013-12-04 18:35:58 UTC
# Script generation date: 2017-01-01 21:15:01 UTC
#
# Operating System: Red Hat 6
# Architecture: i386
#
# Vulnerable packages fix on version:
# - antlr-eap6.noarch:2.7.7-17.redhat_4.1.ep6.el6
# - apache-commons-beanutils.noarch:1.8.3-13.redhat_6.1.ep6.el6
# - apache-commons-cli.noarch:1.2-8.redhat_3.1.ep6.el6
# - apache-commons-configuration.noarch:1.6-8.redhat_3.1.ep6.el6
# - apache-cxf.noarch:2.7.7-1.redhat_1.1.ep6.el6
# - apache-cxf-xjc-utils.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - cxf-xjc-boolean.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - cxf-xjc-dv.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - cxf-xjc-ts.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - dom4j-eap6.noarch:1.6.1-20.redhat_6.1.ep6.el6
# - gnu-getopt.noarch:1.0.13-3.redhat_4.1.ep6.el6
# - jacorb-jboss.noarch:2.3.2-12.redhat_5.1.ep6.el6
# - jansi.noarch:1.9-5.redhat_3.1.ep6.el6
# - jaxen.noarch:1.1.3-9.redhat_3.1.ep6.el6
# - jboss-ejb3-ext-api.noarch:2.1.0-1.redhat_1.1.ep6.el6
# - jboss-jacc-api_1.4_spec.noarch:1.0.3-1.Final_redhat_1.1.ep6.el6
# - jboss-remoting3-jmx.noarch:1.1.2-1.Final_redhat_1.1.ep6.el6
# - jbossws-api.noarch:1.0.2-1.Final_redhat_1.2.ep6.el6
# - jbossws-common-tools.noarch:1.2.0-2.Final_redhat_2.1.ep6.el6
# - jettison.noarch:1.3.1-8.redhat_3.1.ep6.el6
# - juddi.noarch:3.1.3-4.redhat_3.1.ep6.el6
# - org.apache.felix.configadmin.noarch:1.2.8-7.redhat_4.2.ep6.el6
# - org.apache.felix.log.noarch:1.0.0-6.redhat_3.1.ep6.el6
# - shrinkwrap-api.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - shrinkwrap-impl-base.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - shrinkwrap-parent.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - shrinkwrap-spi.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - ws-commons-XmlSchema.noarch:2.0.2-8.redhat_3.1.ep6.el6
# - ws-scout.noarch:1.2.6-4.redhat_3.1.ep6.el6
# - wss4j.noarch:1.6.12-1.redhat_1.1.ep6.el6
# - xerces-j2-eap6.noarch:2.9.1-16.redhat_5.1.ep6.el6
# - xjc-utils.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - xml-security.noarch:1.5.5-2.redhat_2.1.ep6.el6
#
# Last versions recommanded by security team:
# - antlr-eap6.noarch:2.7.7-17.redhat_4.1.ep6.el6
# - apache-commons-beanutils.noarch:1.8.3-13.redhat_6.1.ep6.el6
# - apache-commons-cli.noarch:1.2-8.redhat_3.1.ep6.el6
# - apache-commons-configuration.noarch:1.6-8.redhat_3.1.ep6.el6
# - apache-cxf.noarch:2.7.7-1.redhat_1.1.ep6.el6
# - apache-cxf-xjc-utils.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - cxf-xjc-boolean.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - cxf-xjc-dv.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - cxf-xjc-ts.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - dom4j-eap6.noarch:1.6.1-20.redhat_6.1.ep6.el6
# - gnu-getopt.noarch:1.0.13-3.redhat_4.1.ep6.el6
# - jacorb-jboss.noarch:2.3.2-12.redhat_5.1.ep6.el6
# - jansi.noarch:1.9-5.redhat_3.1.ep6.el6
# - jaxen.noarch:1.1.3-9.redhat_3.1.ep6.el6
# - jboss-ejb3-ext-api.noarch:2.1.0-1.redhat_1.1.ep6.el6
# - jboss-jacc-api_1.4_spec.noarch:1.0.3-1.Final_redhat_1.1.ep6.el6
# - jboss-remoting3-jmx.noarch:1.1.2-1.Final_redhat_1.1.ep6.el6
# - jbossws-api.noarch:1.0.2-1.Final_redhat_1.2.ep6.el6
# - jbossws-common-tools.noarch:1.2.0-2.Final_redhat_2.1.ep6.el6
# - jettison.noarch:1.3.1-8.redhat_3.1.ep6.el6
# - juddi.noarch:3.1.3-4.redhat_3.1.ep6.el6
# - org.apache.felix.configadmin.noarch:1.2.8-7.redhat_4.2.ep6.el6
# - org.apache.felix.log.noarch:1.0.0-6.redhat_3.1.ep6.el6
# - shrinkwrap-api.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - shrinkwrap-impl-base.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - shrinkwrap-parent.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - shrinkwrap-spi.noarch:1.1.2-3.redhat_1.1.ep6.el6
# - ws-commons-XmlSchema.noarch:2.0.2-8.redhat_3.1.ep6.el6
# - ws-scout.noarch:1.2.6-4.redhat_3.1.ep6.el6
# - wss4j.noarch:1.6.12-1.redhat_1.1.ep6.el6
# - xerces-j2-eap6.noarch:2.9.1-16.redhat_5.1.ep6.el6
# - xjc-utils.noarch:2.6.1-4.redhat_2.1.ep6.el6
# - xml-security.noarch:1.5.5-2.redhat_2.1.ep6.el6
#
# CVE List:
# - CVE-2013-2035
# - CVE-2013-2133
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install antlr-eap6.noarch-2.7.7 -y
sudo yum install apache-commons-beanutils.noarch-1.8.3 -y
sudo yum install apache-commons-cli.noarch-1.2 -y
sudo yum install apache-commons-configuration.noarch-1.6 -y
sudo yum install apache-cxf.noarch-2.7.7 -y
sudo yum install apache-cxf-xjc-utils.noarch-2.6.1 -y
sudo yum install cxf-xjc-boolean.noarch-2.6.1 -y
sudo yum install cxf-xjc-dv.noarch-2.6.1 -y
sudo yum install cxf-xjc-ts.noarch-2.6.1 -y
sudo yum install dom4j-eap6.noarch-1.6.1 -y
sudo yum install gnu-getopt.noarch-1.0.13 -y
sudo yum install jacorb-jboss.noarch-2.3.2 -y
sudo yum install jansi.noarch-1.9 -y
sudo yum install jaxen.noarch-1.1.3 -y
sudo yum install jboss-ejb3-ext-api.noarch-2.1.0 -y
sudo yum install jboss-jacc-api_1.4_spec.noarch-1.0.3 -y
sudo yum install jboss-remoting3-jmx.noarch-1.1.2 -y
sudo yum install jbossws-api.noarch-1.0.2 -y
sudo yum install jbossws-common-tools.noarch-1.2.0 -y
sudo yum install jettison.noarch-1.3.1 -y
sudo yum install juddi.noarch-3.1.3 -y
sudo yum install org.apache.felix.configadmin.noarch-1.2.8 -y
sudo yum install org.apache.felix.log.noarch-1.0.0 -y
sudo yum install shrinkwrap-api.noarch-1.1.2 -y
sudo yum install shrinkwrap-impl-base.noarch-1.1.2 -y
sudo yum install shrinkwrap-parent.noarch-1.1.2 -y
sudo yum install shrinkwrap-spi.noarch-1.1.2 -y
sudo yum install ws-commons-XmlSchema.noarch-2.0.2 -y
sudo yum install ws-scout.noarch-1.2.6 -y
sudo yum install wss4j.noarch-1.6.12 -y
sudo yum install xerces-j2-eap6.noarch-2.9.1 -y
sudo yum install xjc-utils.noarch-2.6.1 -y
sudo yum install xml-security.noarch-1.5.5 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/i386/2013/RHSA-2013:1786.sh
|
Shell
|
mit
| 5,831 |
#!/bin/bash
# -*- coding: UTF8 -*-
##
# Bash basics : notes on line endings encoding conversion.
#
# Sources :
# https://help.github.com/articles/dealing-with-line-endings/
# http://stackoverflow.com/a/20653073/2592338
# http://www.unixcl.com/2008/04/linux-flip-command-alternative-of.html
# http://editorconfig.org/
#
# -----------------------------------------------------------------------------
# Ensure Unix-style newlines
# + for Git : both locally and in the repo.
# Install
apt-get install flip -y
# Convert line endings from Windows/DOS (CRLF) to Unix (LF).
flip -u path/to/file.txt
# Batch convert (recursively for all files in current folder) :
find . -type f -exec flip -u {} +
# Git config
git config core.autocrlf false
|
Paulmicha/snippets
|
Sys-admin/Basics/encoding_line_endings.notes.sh
|
Shell
|
mit
| 770 |
#!/bin/sh
set -e
cd "`dirname "$0"`"
##if #(JDK.Directory:IsSet)
export JAVA_HOME="C:/Program Files/Java/jdk1.7.0_79"
##endif
##if #(Ant.Directory:IsSet)
PATH="C:/ant/bin:$PATH"
##endif
for cmd in ant make; do
if ! which $cmd > /dev/null 2>&1; then
echo "ERROR: '$cmd' was not found." >&2
exit 1
fi
done
NDK_DIR="C:/Users/Kuro/AppData/Local/VirtualStore/Windows/SysWOW64/android-ndk-r10e"
SDK_DIR="c:/users/kuro/appdata/local/android/sdk"
PREBUILT_DIR="$NDK_DIR/toolchains/arm-linux-androideabi-4.8/prebuilt"
export CC=`find "$PREBUILT_DIR" -name arm-linux-androideabi-gcc`
export CXX=`find "$PREBUILT_DIR" -name arm-linux-androideabi-g++`
export STRIP=`find "$PREBUILT_DIR" -name arm-linux-androideabi-strip`
if [ -z "$CXX" ]; then
PREBUILT_DIR="$NDK_DIR/toolchains/arm-linux-androideabi-4.6/prebuilt"
export CC=`find "$PREBUILT_DIR" -name arm-linux-androideabi-gcc`
export CXX=`find "$PREBUILT_DIR" -name arm-linux-androideabi-g++`
export STRIP=`find "$PREBUILT_DIR" -name arm-linux-androideabi-strip`
if [ -z "$CXX" ]; then
echo "ERROR: C/C++ compilers was not found in '$PREBUILT_DIR'." >&2
exit 1
fi
fi
echo "## 1/2: libAndroid_UX.so"
if [ -f /proc/cpuinfo ]; then
JOB_COUNT=`grep processor /proc/cpuinfo | wc -l`
else
JOB_COUNT=`sysctl hw.ncpu | cut -d " " -f 2`
fi
make -s -j $JOB_COUNT $*
echo ""
echo "## 2/2: Android UX-debug.apk"
ant debug
|
blyk/BlackCode-Fuse
|
AndroidUX/.build/Simulator/Android/build.sh
|
Shell
|
mit
| 1,437 |
# Command line arguments
# -O2 Good optimizations, including malloc
# -O3 Insane optimizations, but excluding malloc
# -Os Reduced code size
# Compile engine to wasm
#emcc main.c -o build/engine.wasm -Os -s WASM=1 -s SIDE_MODULE=1
#emcc main.c -o build/engine.js -Os -s WASM=1
emcc main.c -o build/engine.js -O2 -s WASM=1 --pre-js pre.js
# Copy build files to www
cp build/engine.* ../www/
|
bassjansson/basslive
|
engine/build.sh
|
Shell
|
mit
| 398 |
#!/bin/bash
python RunSimulation.py --Geo 10.0 --sim_num 19
|
xji3/IGCCodonSimulation
|
ShFiles/YDR418W_YEL054C_IGCgeo_10.0_sim_19.sh
|
Shell
|
mit
| 60 |
cd ..
make all
cd sample
make all
build/imu_gyro
|
CURocketry/LaunchVehicleController2016
|
sample/build.sh
|
Shell
|
mit
| 49 |
#!/usr/bin/env bash
THINGSHUBD_CONFIG=${THINGSHUBD_CONFIG:-$HOME/.local/thingshubd.list}
THINGSHUBD_DEBUG=${THINGSHUBD_DEBUG:-false}
THINGSHUBD_SHOW_NOTIFICATIONS=${THINGSHUBD_SHOW_NOTIFICATIONS:-true}
PATH="/usr/local/bin:$PATH"
set -eu
use_terminal_notifier=true
command -v terminal-notifier >/dev/null 2>&1 || use_terminal_notifier=false
if [ "$THINGSHUBD_DEBUG" = true ]; then
if [ "$use_terminal_notifier" = true ]; then
>&2 echo "[debug] found terminal-notifier at $(command -v terminal-notifier)"
else
>&2 echo "[debug] terminal-notifier not found"
fi
fi
if [ "$THINGSHUBD_SHOW_NOTIFICATIONS" = false ]; then
if [ "$THINGSHUBD_DEBUG" = true ]; then
>&2 echo "[debug] notifications will be hidden due to THINGSHUBD_SHOW_NOTIFICATIONS"
fi
use_terminal_notifier=false
fi
show_msg() {
>&2 echo "$1"
if [ "$use_terminal_notifier" = true ]; then
escaped_msg=$(echo "$1" | sed -e '/^\[/ s/^\[/\\\[/')
terminal-notifier -message "$escaped_msg" -title "ThingsHub Sync" -subtitle "$(date +"%A %T")" -sender com.culturedcode.ThingsMac -activate com.culturedcode.ThingsMac
fi
}
if [ ! -f "$THINGSHUBD_CONFIG" ]; then
show_msg "Sync Failed: Config file not found at $THINGSHUBD_CONFIG"
exit 2
fi
verbose_flag=""
if [ "$THINGSHUBD_DEBUG" = true ]; then
verbose_flag="-verbose"
show_msg "Sync is running."
fi
while read -r dir; do
dir="${dir//\$HOME/$HOME}"
if [ ! -d "$dir" ]; then
show_msg "Failed to sync $(basename "$dir"): not found"
continue
fi
pushd "$dir" >/dev/null
>&2 echo "Syncing '$dir' ..."
thingshub $verbose_flag
popd >/dev/null
show_msg "Sync complete for $(basename "$dir")"
done <"$THINGSHUBD_CONFIG"
osascript -e "tell application \"Things3\" to show list \"Today\""
|
cdzombak/thingshub
|
thingshubd/thingshubd.sh
|
Shell
|
mit
| 1,757 |
cmake -G"Eclipse CDT4 - Unix Makefiles" -DCMAKE_BUILD_TYPE=$1 ..
make -j4
|
phg1024/MultilinearReconstruction
|
eclipse/build.sh
|
Shell
|
mit
| 74 |
#!/bin/bash
#%Y Year, %V Week
DATE="`date +%Y%V`"
[ -f /vagrant/proxy.env ] && source /vagrant/proxy.env
BASE="`docker images -q xenial:${DATE}`"
if [ $BASE ]; then
echo "skipping: found image ${BASE} for today ${DATE}"
else
# update xenial if exists
# install if doesnt
if [ -d /root/xenial ]; then
cp /etc/resolv.conf /root/xenial/etc/resolv.conf
chroot /root/xenial apt-get update
chroot /root/xenial apt-get dist-upgrade -y
chroot /root/xenial apt-get install -y software-properties-common
echo | chroot /root/xenial add-apt-repository ppa:ubuntu-toolchain-r/test
chroot /root/xenial apt-get update
chroot /root/xenial apt-get dist-upgrade -y
elif [ ! -d /root/xenial ] && [ -f /vagrant/xenial.chroot.tar ]; then
mkdir -p /root/xenial
tar xf /vagrant/xenial.chroot.tar --numeric-owner -C /root/xenial/
cp /etc/resolv.conf /root/xenial/etc/resolv.conf
chroot /root/xenial apt-get update
chroot /root/xenial apt-get dist-upgrade -y
chroot /root/xenial apt-get install -y software-properties-common
echo | chroot /root/xenial add-apt-repository ppa:ubuntu-toolchain-r/test
chroot /root/xenial apt-get update
chroot /root/xenial apt-get dist-upgrade -y
else
debootstrap xenial /root/xenial
chroot /root/xenial apt-get install -y software-properties-common
echo | chroot /root/xenial add-apt-repository ppa:ubuntu-toolchain-r/test
chroot /root/xenial apt-get update
chroot /root/xenial apt-get dist-upgrade -y
fi
#clean chroot
chroot /root/xenial apt-get clean
> /root/xenial/etc/resolv.conf
#create docker image
pushd /root/xenial
tar -c . --numeric-owner -f /vagrant/xenial.chroot.tar
cat /vagrant/xenial.chroot.tar | docker import - xenial
docker tag xenial:latest xenial:${DATE}
docker save xenial:latest > /vagrant/xenial.docker.tar
popd
fi
#the dockers
pushd /vagrant/dockerfiles
docker build -t xenial-gcc6 xenial-gcc6/.
docker tag xenial-gcc6:latest xenial-gcc6:${DATE}
popd
#simple test
docker run xenial-gcc6:${DATE} gcc --version
#delete <none> images
NONE="`docker images -q --filter "dangling=true"`"
[ "${NONE}" ] && docker rmi -f ${NONE}
|
kikitux/dockerhost-vagrant
|
scripts/xenial.sh
|
Shell
|
mit
| 2,181 |
log "starting jenkins"
/usr/sbin/svcadm enable jenkins
|
datasets-at/mi-jenkins
|
copy/var/zoneinit/includes/31-jenkins.sh
|
Shell
|
mit
| 57 |
#!/system/xbin/bash
setprop persist.sys.usb.config mass_storage,adb
echo /dev/block/vold/179:32 > /sys/devices/platform/msm_hsusb/gadget/lun0/file
|
ii/iiphone
|
android_research/runme.sh
|
Shell
|
mit
| 148 |
#!/bin/bash
# subscription-therm-sensors.sh
# Copyright(c) 2016 Bitergia
# Author: Alvaro del Castillo <[email protected]>,
# Alberto Martín <[email protected]>
# MIT Licensed
#
# Restaurant temperature sensors subscription
CYGNUS_HOST=$( getent hosts cygnus | sort -u | awk '{print $1}' )
CYGNUS_PORT=5050
CYGNUS_URL=http://${CYGNUS_HOST}:${CYGNUS_PORT}/notify
ORION_HOST=$( getent hosts orion | sort -u | awk '{print $1}' )
ORION_PORT=1026
ORION_URL=http://${ORION_HOST}:${ORION_PORT}/v1/subscribeContext
cat <<EOF | curl ${ORION_URL} -s -S --header 'Content-Type: application/json' --header 'Accept: application/json' --header 'Fiware-Service: tourguide' --header 'Fiware-ServicePath: /#' -d @-
{
"entities": [
{
"type": "Restaurant",
"isPattern": "false",
"id": "0115206c51f60b48b77e4c937835795c33bb953f"
}
],
"attributes": [
"temperature:kitchen",
"temperature:dining"
],
"reference": "${CYGNUS_URL}",
"duration": "P1M",
"notifyConditions": [
{
"type": "ONCHANGE",
"condValues": [
"temperature:kitchen",
"temperature:dining"
]
}
],
"throttling": "PT1S"
}
EOF
|
Fiware/tutorials.TourGuide-App
|
docker/cygnus/subscriptions/subscription-therm-sensors.sh
|
Shell
|
mit
| 1,257 |
_taw_completion() {
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \
COMP_CWORD=$COMP_CWORD \
_TAW_COMPLETE=complete $1 ) )
return 0
}
complete -F _taw_completion -o default taw;
|
mkasa/taw
|
taw_completion.sh
|
Shell
|
mit
| 228 |
#!/usr/bin/env bash
set -e
SCRIPT_DIR_NAME="$( cd "$( dirname "$0" )" && pwd )"
if [ $# != 2 ]; then
echo "usage: $(basename "$0") <cloudfeaster-tar-gz> <image-name>" >&2
exit 1
fi
CLOUDFEASTER_TAR_GZ=${1:-}
IMAGE_NAME=${2:-}
REPO_ROOT_DIR=$(repo-root-dir.sh)
CONTEXT_DIR=$(mktemp -d 2> /dev/null || mktemp -d -t DAS)
cp "${REPO_ROOT_DIR}/bin/install-chrome.sh" "${CONTEXT_DIR}/."
cp "${REPO_ROOT_DIR}/bin/install-chromedriver.sh" "${CONTEXT_DIR}/."
cp "${CLOUDFEASTER_TAR_GZ}" "${CONTEXT_DIR}/cloudfeaster.tar.gz"
docker build \
-t "${IMAGE_NAME}" \
--file "${SCRIPT_DIR_NAME}/Dockerfile" \
--build-arg "CIRCLE_CI_EXECUTOR=$(get-circle-ci-executor.sh)" \
"${CONTEXT_DIR}"
rm -rf "${CONTEXT_DIR}"
exit 0
|
simonsdave/cloudfeaster
|
dockerfiles/dev-env/build-docker-image.sh
|
Shell
|
mit
| 738 |
#!/bin/bash
set -o nounset
set -o errexit
# necessary declarations
BASEDIR=$(dirname $0)
DATE=$(date +%Y_%m_%d-%H_%M_%S)
YEAR=$(date +%Y)
MONTH=$(date +%m)
DAY=$(date +%d)
# Load specified config if it exists and check for project and url after loading it
if [ "$#" -eq 0 ]; then
echo "No config file argument found"
exit 2
fi
PROJECT="$1"
CONFIG="$BASEDIR/$PROJECT.conf"
if [ -f $CONFIG ]; then
source $CONFIG
else
echo "Configuration file ($CONFIG) not found"
exit 2
fi
BASEDIR="$TIMELAPSEPATH/$PROJECT"
TMPDIR="$BASEDIR/tmp"
BASEIMGDIR="$BASEDIR/img"
IMGDIR="$BASEIMGDIR/$YEAR/$MONTH/$DAY"
FILE="$IMGDIR/$DATE.jpg"
VIDDIR="$BASEDIR/video"
WGETTHIS="$(echo $URL)"
LOGFILE="$BASEDIR/$PROJECT.log"
mkdir -p $TMPDIR
mkdir -p $VIDDIR
mkdir -p $IMGDIR
echo "$(date) - Initiating Timelapse run" >> $LOGFILE
echo "$(date) - wget and jpegoptim-run" >> $LOGFILE
wget -nv $WGETTHIS -O $FILE &>> $LOGFILE 2>&1
jpegoptim --quiet --max=50 $FILE >> $LOGFILE 2>&1
echo "$(date) - Compiling list of images to use" >> $LOGFILE
AGEARGUMENT=${2:-}
if [ -z $AGEARGUMENT ]; then
AGE=3
elif [ "$AGEARGUMENT" -eq "$AGEARGUMENT" ] 2>/dev/null; then
AGE=$AGEARGUMENT
else
echo"$(date) - Age argument poorly specified ($AGEARGUMENT). Setting to 3 days"
AGE=3
fi
echo "$(date) - Including all files newer than $AGE days of non-0 size." >> $LOGFILE
TIMELAPSEFILES=$(find $BASEIMGDIR -type f -size +1k -mtime -$AGE | sort)
echo "$(date) - Starting timelapse-creation" >> $LOGFILE
echo "$(date) - Cropping images if too large" >> $LOGFILE
count=0
for IMG in $TIMELAPSEFILES; do
NEW=$(printf "FRAME_%05d.jpg" $count)
let count=$count+1
convert -size 1920x1080+0+0 -gravity center $IMG $TMPDIR/$NEW >> $LOGFILE 2>&1
done
WEBM="$VIDDIR/timelapse.webm"
MP4="$VIDDIR/timelapse.mp4"
echo "$(date) - Image conversion completed. Compiling movies" >> $LOGFILE
echo "$(date) - 1st pass webm" >> $LOGFILE
avconv -i $TMPDIR/FRAME_%05d.jpg -loglevel error -threads 1 -s 1920x1080 -preset libvpx-1080p -b 4800k -pass 1 -an -f webm -y "$WEBM.tmp" >> $LOGFILE 2>&1
echo "$(date) - 2nd pass webm" >> $LOGFILE
avconv -i $TMPDIR/FRAME_%05d.jpg -loglevel error -threads 1 -preset libvpx-1080p -b 4800k -pass 2 -an -f webm -y "$WEBM.tmp" >> $LOGFILE 2>&1
echo "$(date) - webm to mp4" >> $LOGFILE
avconv -i $WEBM.tmp -loglevel error -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" -b 2048k -r 30 -c:a libmp3lame -f mp4 -y "$MP4.tmp" >> $LOGFILE 2>&1
echo "$(date) - Moving temporary files to right places" >> $LOGFILE
mv $WEBM.tmp $WEBM
mv $MP4.tmp $MP4
echo "$(date) - Movie creation completed, removing TMPDIR" >> $LOGFILE
rm -rf $TMPDIR
echo "$(date) - Timelapse run completed" >> $LOGFILE
|
Sjolus/timelapse
|
timelapse.sh
|
Shell
|
mit
| 2,684 |
SPP=Cryptococcus_gattii_WM276
PREFIX=ftp://ftp.ncbi.nih.gov/genomes/Fungi/Cryptococcus_gattii_WM276/
mkdir $SPP
cd $SPP
wget ${PREFIX}*.fna
cat *.fna > ../$SPP.fasta
|
sauloal/cnidaria
|
analysis/data/converters/external/Fungi/Cryptococcus_gattii_WM276.sh
|
Shell
|
mit
| 166 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2313-1
#
# Security announcement date: 2011-09-29 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:18 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - iceweasel:3.5.16-10
#
# Last versions recommanded by security team:
# - iceweasel:3.5.16-10
#
# CVE List:
# - CVE-2011-2372
# - CVE-2011-2995
# - CVE-2011-2998
# - CVE-2011-2999
# - CVE-2011-3000
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade iceweasel=3.5.16-10 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_6_(Squeeze)/x86_64/2011/DSA-2313-1.sh
|
Shell
|
mit
| 686 |
# Based on http://larryn.blogspot.nl/2012/11/my-way-for-binding-ssh-agent-with-zshell.html
function ssh-start-agent () {
SSHPID=`ps ax|grep -c "[s]sh-agent"`
if (( $SSHPID == 0 )); then
ssh-agent > ~/.ssh-env
source ~/.ssh-env
ssh-add
else
source ~/.ssh-env
fi
}
ssh-start-agent
|
jaapz/zsh-scripts
|
start-ssh-agent.sh
|
Shell
|
mit
| 328 |
#!/bin/bash
npm install express
npm install mongodb --mongodb:native
npm install mongoose
npm install jade
|
a8uhnf/do-server
|
install.sh
|
Shell
|
mit
| 106 |
#!/bin/bash
# time is in seconds in the AppleScript, in minutes for shutdown.
# this script works even if no one is logged.
shutdown -r +2
osascript << 'END'
set endTime to ((current date) + 120)
repeat until (endTime = (current date))
set whatsLeft to ((endTime - (current date)) as number)
if whatsLeft > 60 then
set whatsLeft to (whatsLeft / 60)
display dialog ((whatsLeft as integer) as string) & " minutes left before the computer will reboot. Please save your work.
L'ordinateur va redémarrer dans " & ((whatsLeft as integer) as string) & " minutes. Enregistrez vos documents ouverts, s'il vous plaît. " giving up after 60 buttons {"Refresh"} default button 1
else
display dialog ((whatsLeft as integer) as string) & " seconds left before the computer will reboot. Please save your work.
L'ordinateur va redémarrer dans " & ((whatsLeft as integer) as string) & " secondes. Enregistrez vos documents ouverts, s'il vous plaît. " giving up after 30 buttons {"Refresh"} default button 1
end if
end repeat
END
exit 0
|
cynikl/mac-scripts
|
reboot-script/reboot_dialog.sh
|
Shell
|
mit
| 1,036 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/JLString/JLString.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/JLString/JLString.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
lujie001122/JLString
|
Example/Pods/Target Support Files/Pods-JLString_Example/Pods-JLString_Example-frameworks.sh
|
Shell
|
mit
| 4,662 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1""
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/GZAdMobSDK/GZAdMobSDK.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/GZAdMobSDK/GZAdMobSDK.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
poormonkey/GZAdMobSDK
|
Example/Pods/Target Support Files/Pods-GZAdMobSDK_Example/Pods-GZAdMobSDK_Example-frameworks.sh
|
Shell
|
mit
| 3,715 |
#!/bin/sh
#→实现把脚本当前目录下的文件拷贝到所有服务器的任意目录
. /etc/init.d/functions
file="$1" #→传参文件
remote_dir="$2" #→远程服务器目录
if [ $# -ne 2 ];then #→如果传的参数不等于2个,那么就打印如下报错信息。
#→ $#:获取当前shell命令行中的参数的总个数
#→ -ne:不等于
echo "usage:$0 argv1 argv2"
#→$0:首个参数(fenfa_host.sh)
echo "must have two argvs."
exit
fi
for ip in $(cat iplist.txt)
#→$():在脚本里引用全局变量
do
scp -P4399 -r -p $file binzai@$ip:~ >/dev/null 2>&1 &&\
#→将hosts文件传到binzai家目录下,如果没有传递过去,将丢弃到/dev/null
ssh -p4399 -t binzai@$ip sudo rsync -avz -P $file $remote_dir >/dev/null 2>&1
#→通过ssh通道执行sudo命令将hosts文件拷贝到/etc目录下
if [ $? -eq 0 ];then #→如果上次执行结果返回值等于0,则执行OK。如果不等于0,则执行NO
#→$?:上次执行结果的返回值
#→-eq:等于
action "$ip is successful." /bin/true
else
action "$ip is failure." /bin/false
fi
done
|
guadeitong/script
|
common/fenfa.sh
|
Shell
|
cc0-1.0
| 1,108 |
#!/bin/bash
for i in {1..5}; do
rm -rf ./toprocess/*
mkdir ./toprocess/
cp -r ~/mondo-codemodel/source-projects/djvu ./toprocess/
rm -rf export
mkdir export
rm -rf results
mkdir results
~/4store-graph-driver/scripts/4s-restart.sh
mongo jamopp --eval "db.dependencies.drop();"
java -jar ./jamoppdiscoverer-0.0.1-SNAPSHOT.jar | grep -e DepGraph -e ASG -e ImportTime | tee results/sum.txt
./import-check.sh
cp -r ~/mondo-codemodel/source-projects/djvu-mod/* ./toprocess/
java -jar ./jamoppdiscoverer-0.0.1-SNAPSHOT.jar -modified ./toprocess/modified.txt | grep -e DepGraph -e ASG -e ImportTime | tee -a results/sum.txt
./import-check.sh
mkdir -p ~/mondo-codemodel/results/djvu-mod/
cp ./results/sum.txt ~/mondo-codemodel/results/djvu-mod/sum$i.txt
echo djvu-mod $i done
done
mkdir -p ~/mondo-codemodel/exports/djvu-mod/
cp -r ./export/ ~/mondo-codemodel/exports/djvu-mod/
|
FTSRG/mondo-codemodel
|
target/benchmark-mod.sh
|
Shell
|
epl-1.0
| 898 |
#! /bin/sh
ARCH=i386
if [ -e /lib64 ]; then
ARCH=amd64
fi
case "$ARCH" in
i386)
cd /tmp
wget -c http://www.xncore.com/download/xn-suite_0.83-lenny1_i386.deb
# wget -c http://www.xncore.com/download/xn-client_0.82-lenny1_i386.deb
dpkg -i xn-suite_0.83-lenny1_i386.deb
rm -f xn-suite_0.83-lenny1_i386.deb
chown www-data: -R /usr/share/xn-core/xn-web
sqlite3 /var/lib/xn-core/xn-web/xn-web.sqlite "UPDATE user SET password = 'live' WHERE username = 'admin'"
/etc/init.d/xn-daemon stop
;;
amd64)
cd /tmp
wget -c http://www.xncore.com/download/xn-suite_0.83-lenny1_amd64.deb
# wget -c http://www.xncore.com/download/xn-client_0.82-lenny1_amd64.deb
dpkg -i xn-suite_0.83-lenny1_amd64.deb
rm -f xn-suite_0.83-lenny1_amd64.deb
chown www-data: -R /usr/share/xn-core/xn-web
sqlite3 /var/lib/xn-core/xn-web/xn-web.sqlite "UPDATE user SET password = 'live' WHERE username = 'admin'"
/etc/init.d/xn-daemon stop
;;
esac
|
fr34k8/xenlivecd
|
stuff/02-install-xn-suite.sh
|
Shell
|
gpl-2.0
| 1,056 |
#!/bin/bash
# shellcheck disable=SC1003
# shellcheck disable=SC2016
set -euo pipefail
IFS=$'\n\t'
####
#
# CARE archive to Docker image converter.
#
# There is absolutely **no garanty** this works for every CARE archive.
# This essentially writes the archive's rootfs on top of the base's so there
# might be some cases where this breaks.
# Just here to simplify basic conversions.
#
# Prerequisites:
# - a folder containing a CARE archive
# - docker installed
# Usage:
# - ./care2docker.sh </path/to/archive.tgz.bin>
#
# Copyright Jonathan Passerat-Palmbach, 2016
#
####
archive=$(readlink -f "$1")
basedir=$(dirname "${archive}")
pushd "${basedir}"
"${archive}"
popd
archive_name=$(basename "${archive}" | cut -d '.' -f1)
tar cf "${basedir}/${archive_name}_rootfs.tar" --xform="s,$(echo "${basedir}" | cut -d '/' -f 2-)/${archive_name}/rootfs/,," "${basedir}/${archive_name}"/rootfs/*
cat << EOF > "${basedir}"/Dockerfile
FROM debian:jessie
ADD "${archive_name}_rootfs.tar" /
$(echo -n "ENV " ; grep -e \''.*=.*'\'' \\' "${basedir}/${archive_name}/re-execute.sh" |
sed -e "s/.\(.*\)=/\1='/g" |
sed -e '$ s/ \\//g')
WORKDIR $(grep -e '^-w '\''.*'\'' \\' "${basedir}/${archive_name}/re-execute.sh" |
sed -e 's/^-w '\''\(.*\)'\'' \\/\1/g')
ENTRYPOINT ["$(grep -e '\[ $nbargs -ne 0 \] || set -- \\' -A 1 "${basedir}/${archive_name}/re-execute.sh" |
tail -n1 |
sed -e 's/'\''\(.*\)'\'' \\/\1/g'
)"]
EOF
cd "${basedir}" || exit 1
docker build -t "${archive_name}" .
|
proot-me/PRoot
|
contrib/care2docker.sh
|
Shell
|
gpl-2.0
| 1,482 |
scrapy runspider -a category=fantasy --nolog -o - -t json audiobookbay.py
|
frodeaa/abook-spider
|
json.sh
|
Shell
|
gpl-2.0
| 74 |
#!/bin/bash
echo "cd /Users/zhangjidong/KP/iQiYi_workspace/svn/puma3"
cd /Users/zhangjidong/KP/iQiYi_workspace/svn/puma3
git checkout .
git pull --rebase
more puma3.podspec|grep s.version
|
idanielz/idanielz.github.io
|
myPodShell/puma3/1_checkVersion.sh
|
Shell
|
gpl-2.0
| 189 |
#!/bin/bash
sudo apt-get install djvulibre-bin libdjvulibre21 libtiff-tools mupdf mupdf-tools pdftk poppler-utils git djview
sudo pip install --upgrade google-api-python-client
sudo pip install clint requests wikitools poster oauth2client apiclient
sudo pip uninstall -y apiclient
sudo pip uninstall -y google-api-python-client
sudo pip install google-api-python-client
sudo pip install gdcmdtools
sudo apt-get install python-software-properties
sudo add-apt-repository -y ppa:ubuntuhandbook1/apps
sudo apt-get update
sudo apt-get install mupdf-tools
sudo add-apt-repository ppa:malteworld/ppa -y
sudo apt update
sudo apt install pdftk -y
|
tshrinivasan/OCR4wikisource
|
setup.sh
|
Shell
|
gpl-2.0
| 648 |
#!/bin/sh
set -e
. ./paths
cd $BASEPATH
# FYI, genkernel will mknod a few devices, and officially in an nspawn container you cant do that (generally for more than like zero and null),
# but you actually can go to the devices cgroup sysfs thing for the container in the host and enable mknod-ing any device with echo 'a *:* m' > devices.allow
rm -f boot/initramfs
genkernel --no-btrfs --no-zfs --disklabel --compress-initramfs-type=gzip --install --bootdir=$BASEPATH/boot/ --module-prefix=$BASEPATH/$GENNAME/ --kerneldir=$BASEPATH/kernel initramfs
cd boot
mv initramfs* initramfs
|
urjaman/makelive
|
initramfs.sh
|
Shell
|
gpl-2.0
| 580 |
rm output.txt
cuda-memcheck --leak-check full RayTracer > output.txt;
|
tompitkin/RayTracer
|
memcheck.sh
|
Shell
|
gpl-2.0
| 70 |
#!/bin/sh
if [ ! -d venv ]
then
virtualenv venv
virtualenv -p /usr/bin/python3.4 venv
fi
echo "Reminder :"
echo "Activation source venv/bin/activate"
echo "Deactivation deactivate"
echo "Running test python -m unittest discover ."
|
eplanet/diffbuilder
|
setup.sh
|
Shell
|
gpl-2.0
| 246 |
# export AWS_ACCESS_KEY="Your-Access-Key"
# export AWS_SECRET_KEY="Your-Secret-Key"
today=`date +"%d-%m-%Y","%T"`
logfile="/awslog/automation-instances.log"
# Grab all Instance IDs for REBOOT action and export the IDs to a text file
sudo aws ec2 describe-instances --filters Name=tag:reboot-time,Values=19-00 Name=tag:bash-profile,Values=wd --query Reservations[*].Instances[*].[InstanceId] --output text > ~/tmp/reboot_wd_instance_info.txt 2>&1
# Take list of rebooting instances
for instance_id in $(cat ~/tmp/reboot_wd_instance_info.txt)
do
# Reboot instances
rebootresult=$(sudo aws ec2 reboot-instances --instance-ids $instance_id)
# Put info into log file
echo Atempt to reboot $today instances by AWS CLI with result $rebootresult --EMPTY FOR NO RESULT-- >> $logfile
done
|
STARTSPACE/aws-ec2-start-stop-reboot-by-timetable
|
reboot/wd/reboot-wd-19.sh
|
Shell
|
gpl-2.0
| 785 |
#!/bin/bash
################################
# Network Destroyer #
# By: Cameron Mayor #
#Written to force oneself to #
#practice for the RHCSA #
################################
#restore the original config if the restore argument is passed to the script
if [ "$1" == "restore" ]; then
{
cat backups/ifcfg-eth0 > /etc/sysconfig/network-scripts/ifcfg-eth0
cat backups/resolv.conf > /etc/resolv.conf
cat backups/network > /etc/sysconfig/network
service NetworkManager start
chkconfig NetworkManager on
chattr -i /root/breaknetwork.lock
rm -rf /root/breaknetwork.lock
}
echo "Restored successfully!"
exit
fi
if [ -e /root/breaknetwork.lock ]; then
{
echo "This script has already destoryed your network!"
exit
}
exit
fi
#stop and disable Network Manager
service NetworkManager stop
chkconfig NetworkManager off
#backup existing network config files
mkdir backups >/dev/null 2>$1
cp /etc/sysconfig/network backups/network
cp /etc/resolv.conf backups/resolv.conf
cp /etc/sysconfig/network-scripts/ifcfg-eth0 backups/ifcfg-eth0
#blow away existing network config files
>/etc/sysconfig/network
>/etc/resolv.conf
>/etc/sysconfig/network-scripts/ifcfg-eth0
#create lock file
touch /root/breaknetwork.lock
chattr +i /root/breaknetwork.lock
#Done
echo "Done destroying your network! Have fun fixing it!"
exit
|
wcmayor/redhat-scripts
|
breaknetwork.sh
|
Shell
|
gpl-2.0
| 1,346 |
NAME=SortFunction
#NAME=Timeout
#NAME=Tokenizer
./modules/mutate_php.sh $NAME
|
cpantel/codeMutator
|
php.sh
|
Shell
|
gpl-2.0
| 79 |
#!/bin/bash
#we move in the current script directory
script_dir=$(readlink -f $0)
script_dir=${script_dir%\/*.sh}
PROGRAM_DIR=$script_dir/..
cd $PROGRAM_DIR
. ./include_variables.sh "noflags" || exit 1
put_package "arch"
if [[ $ARCH = "i386" ]];then
ARCH=i686
CFLAGS="-O2 -mtune=${ARCH} $CFLAGS"
fi
#if we don't have the distribution file
DIST_FILE="../mp3splt-${MP3SPLT_VERSION}-1_${ARCH}.pkg.tar.gz"
if [[ ! -f $DIST_FILE ]];then
#we generate the PKGBUILD file
cd arch && echo "# \$Id: PKGBUILD \$
# Packager: Munteanu Alexandru Ionut <[email protected]>
pkgname=mp3splt
pkgver=${MP3SPLT_VERSION}
pkgrel=1
pkgdesc=\"Mp3splt is the command line program from the mp3splt-project, to split mp3 and ogg without decoding\"
arch=(i686 x86_64)
url=\"http://mp3splt.sourceforge.net\"
groups=root
depends=('libmp3splt=${LIBMP3SPLT_VERSION}')
source=(\$pkgname-\$pkgver.tar.gz)
build() {
cd \$startdir/src/\$pkgname-\$pkgver
./configure --prefix=/usr
make || return 1
mkdir -p \$startdir/pkg/usr/share/\$pkgname/doc
cp ${MP3SPLT_DOC_FILES[@]} \$startdir/pkg/usr/share/\$pkgname/doc/
make prefix=\$startdir/pkg/usr install
}" > PKGBUILD && cd ..
#we set the flags to find libmp3splt
export CFLAGS="-I${PROGRAM_DIR}/../libmp3splt/arch/pkg/usr/include $CFLAGS"
export LDFLAGS="-L${PROGRAM_DIR}/../libmp3splt/arch/pkg/usr/lib $LDFLAGS"
#we make the distribution file if we don't have it
if [[ ! -e ../mp3splt-${MP3SPLT_VERSION}.tar.gz ]];then
./make_source_package.sh || exit 1
fi &&\
cp ../mp3splt-${MP3SPLT_VERSION}.tar.gz ./arch &&\
cd arch && makepkg -d -c &&\
mv mp3splt-${MP3SPLT_VERSION}-1.pkg.tar.gz \
../../mp3splt-${MP3SPLT_VERSION}-1_${ARCH}.pkg.tar.gz &&\
rm -f ./mp3splt-${MP3SPLT_VERSION}.tar.gz && rm -f PKGBUILD \
|| exit 1
else
put_is_package_warning "We already have the $DIST_FILE distribution file !"
fi
|
markjeee/mp3splt
|
arch/make_arch_package.sh
|
Shell
|
gpl-2.0
| 1,936 |
#!/bin/bash
#
# This script is run by Codeship to send an SQS message to deploy the app to production.
# It expects CI_COMMIT_ID, CI_COMMITTER_USERNAME, SERVICE_NAME, GIT_REPO_URL,
# SQS_URL, NEWRELIC_LICENSE and the AWS credentials to be available in the env.
#
BASE_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" && pwd )
echo Deploying ${CI_COMMIT_ID}..
SQS_MESSAGE=$(<${BASE_DIR}/.docker/staging/aws-task-definition.json)
SQS_MESSAGE="${SQS_MESSAGE//\<COMMIT_HASH\>/${CI_COMMIT_ID}}"
SQS_MESSAGE="${SQS_MESSAGE//\<NEWRELIC_LICENSE\>/${NEWRELIC_LICENSE}}"
SQS_MESSAGE="${SQS_MESSAGE/\<WEBCRON_TOKEN\>/${WEBCRON_TOKEN}}"
SQS_MESSAGE_ATTRIBUTES='{"service": { "StringValue": "fezstaging", "DataType": "String" }, "commit_url": { "StringValue": "https://github.com/uqlibrary/fez/commit/<COMMIT_HASH>", "DataType": "String" }, "committer": { "StringValue": "<COMMITTER>", "DataType": "String" } }'
SQS_MESSAGE_ATTRIBUTES="${SQS_MESSAGE_ATTRIBUTES//\<COMMIT_HASH\>/${CI_COMMIT_ID}}"
SQS_MESSAGE_ATTRIBUTES="${SQS_MESSAGE_ATTRIBUTES//\<COMMITTER\>/${CI_COMMITTER_USERNAME}}"
aws sqs send-message \
--queue-url ${SQS_URL} \
--message-body "${SQS_MESSAGE}" \
--message-attributes "${SQS_MESSAGE_ATTRIBUTES}"
SQS_MESSAGE=$(<${BASE_DIR}/.docker/staging/aws-task-definition-bgp.json)
SQS_MESSAGE="${SQS_MESSAGE//\<COMMIT_HASH\>/${CI_COMMIT_ID}}"
SQS_MESSAGE="${SQS_MESSAGE//\<NEWRELIC_LICENSE\>/${NEWRELIC_LICENSE}}"
SQS_MESSAGE="${SQS_MESSAGE/\<WEBCRON_TOKEN\>/${WEBCRON_TOKEN}}"
SQS_MESSAGE_ATTRIBUTES='{"service": { "StringValue": "false", "DataType": "String" }, "commit_url": { "StringValue": "https://github.com/uqlibrary/fez/commit/<COMMIT_HASH>", "DataType": "String" }, "committer": { "StringValue": "<COMMITTER>", "DataType": "String" } }'
SQS_MESSAGE_ATTRIBUTES="${SQS_MESSAGE_ATTRIBUTES//\<COMMIT_HASH\>/${CI_COMMIT_ID}}"
SQS_MESSAGE_ATTRIBUTES="${SQS_MESSAGE_ATTRIBUTES//\<COMMITTER\>/${CI_COMMITTER_USERNAME}}"
aws sqs send-message \
--queue-url ${SQS_URL} \
--message-body "${SQS_MESSAGE}" \
--message-attributes "${SQS_MESSAGE_ATTRIBUTES}"
SQS_MESSAGE=$(<${BASE_DIR}/.docker/staging/aws-task-definition-bgp-fg.json)
SQS_MESSAGE="${SQS_MESSAGE//\<COMMIT_HASH\>/${CI_COMMIT_ID}}"
SQS_MESSAGE="${SQS_MESSAGE//\<NEWRELIC_LICENSE\>/${NEWRELIC_LICENSE}}"
SQS_MESSAGE="${SQS_MESSAGE/\<WEBCRON_TOKEN\>/${WEBCRON_TOKEN}}"
SQS_MESSAGE_ATTRIBUTES='{"service": { "StringValue": "false", "DataType": "String" }, "commit_url": { "StringValue": "https://github.com/uqlibrary/fez/commit/<COMMIT_HASH>", "DataType": "String" }, "committer": { "StringValue": "<COMMITTER>", "DataType": "String" } }'
SQS_MESSAGE_ATTRIBUTES="${SQS_MESSAGE_ATTRIBUTES//\<COMMIT_HASH\>/${CI_COMMIT_ID}}"
SQS_MESSAGE_ATTRIBUTES="${SQS_MESSAGE_ATTRIBUTES//\<COMMITTER\>/${CI_COMMITTER_USERNAME}}"
aws sqs send-message \
--queue-url ${SQS_URL} \
--message-body "${SQS_MESSAGE}" \
--message-attributes "${SQS_MESSAGE_ATTRIBUTES}"
|
uqlibrary/fez
|
scripts/backend-deployment-staging.sh
|
Shell
|
gpl-2.0
| 2,924 |
#!/bin/bash
#
# 一个简单的 WAR&&静态代码 部署脚本, 执行时必须在disconf-web目录下执行本脚本
#
#
# 执行前请确定环境变量里存在以下两个变量
# 1. $ONLINE_CONFIG_PATH : java web 线上配置目录
# 2. $WAR_ROOT_PATH : java web war 包
#
# 脚本执行后,将生成从下文件:
# $WAR_ROOT_PATH/ 项目根目录(请将Tomcat指向此目录)
# $WAR_ROOT_PATH/disconf-web.war 生成的War包
# $WAR_ROOT_PATH/html HTML前端代码(请将Nginx指向此目录)
# $WAR_ROOT_PATH/META-INF
# $WAR_ROOT_PATH/WEB-INF Java Classes
#
#
# 线上配置的路径 ONLINE_CONFIG_PATH=/home/work/dsp/disconf-rd/online-resources
# 需要预先设置在环境变量里
if [ "$ONLINE_CONFIG_PATH" = "" ]; then
echo "ONLINE_CONFIG_PATH is null, please set it in your env."
exit 1
fi
#
# WAR要放的路径 WAR_ROOT_PATH=/home/work/dsp/disconf-rd/war
# 需要预先设置在环境变量里
if [ "$WAR_ROOT_PATH" = "" ]; then
echo "ONLINE_CONFIG_PATH is null, please set it in your env."
exit 1
fi
set -e
export PATH
echo "**********************************************"
echo "copy online config " $ONLINE_CONFIG_PATH
echo "**********************************************"
cp $ONLINE_CONFIG_PATH src/main -rp
echo "**********************************************"
echo "It's going to Generate the output for war"
echo "**********************************************"
current_path=`pwd`
#
# 进行WAR打包
#
echo "**********************************************"
echo "It's going to got war package"
echo "**********************************************"
sh deploy/build_java.sh
#
# 进行FE打包
#
echo "**********************************************"
echo "It's going to got fe package"
echo "**********************************************"
cd html
python build.py
#
cd $current_path
#
# 清空原始目录
#
mkdir -p $WAR_ROOT_PATH
if [ ${#WAR_ROOT_PATH} -gt 15 ]; then
echo "rm " $WAR_ROOT_PATH
rm -rf "$WAR_ROOT_PATH"
mkdir -p $WAR_ROOT_PATH
fi
#
#
#
echo "start to copy war"
cp output/disconf-web.war $WAR_ROOT_PATH -rp
#
#
#
echo "start to copy static"
mkdir $WAR_ROOT_PATH/html
cp html/output/* $WAR_ROOT_PATH/html -rp
#
#
#
cd $WAR_ROOT_PATH
echo "start to jar war"
jar xvf disconf-web.war
cd $current_path
echo "deploy done" $WAR_ROOT_PATH
|
ChainBoy/disconf
|
disconf-web/deploy/deploy.sh
|
Shell
|
gpl-2.0
| 2,345 |
#!/bin/bash
#
# Copyright (c) 2014 Igor Pecovnik, igor.pecovnik@gma**.com
#
# www.igorpecovnik.com / images + support
#
# Main branch
#
#--------------------------------------------------------------------------------------------------------------------------------
# currently there is no option to create an image without root
# you can compile a kernel but you can complete the whole process
# if you find a way, please submit code corrections. Thanks.
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$UID" -ne 0 ]
then echo "Please run as root"
exit
fi
#--------------------------------------------------------------------------------------------------------------------------------
# Choose for which board you want to compile
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$BOARD" == "" ]; then
BOARDS="Cubieboard A10 Cubieboard2 A20 Cubietruck A20 Lime A20 Lime2 A20 Micro A20 Bananapi A20 Cubox-i imx6";
MYLIST=`for x in $BOARDS; do echo $x ""; done`
whiptail --title "Choose a board" --backtitle "" --menu "\nWhich one?" 18 30 8 $MYLIST 2>results
BOARD=$(<results)
BOARD=${BOARD,,}
rm results
fi
# exit the script on cancel
if [ "$BOARD" == "" ]; then echo "ERROR: You have to choose one board"; exit; fi
#--------------------------------------------------------------------------------------------------------------------------------
# Choose for which distribution you want to compile
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$RELEASE" == "" ]; then
RELEASE="wheezy Debian jessie Debian trusty Ubuntu";
MYLIST=`for x in $RELEASE; do echo $x ""; done`
whiptail --backtitle "" --title "Select distribution" --menu "" 12 30 4 $MYLIST 2>results
RELEASE=$(<results)
rm results
fi
# exit the script on cancel
if [ "$RELEASE" == "" ]; then echo "ERROR: You have to choose one distribution"; exit; fi
#--------------------------------------------------------------------------------------------------------------------------------
# Choose for which branch you want to compile
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$BRANCH" == "" ]; then
BRANCH="default 3.4.x next mainline";
MYLIST=`for x in $BRANCH; do echo $x ""; done`
whiptail --backtitle "" --title "Select distribution" --menu "" 12 30 4 $MYLIST 2>results
BRANCH=$(<results)
rm results
fi
# exit the script on cancel
if [ "$BRANCH" == "" ]; then echo "ERROR: You have to choose one branch"; exit; fi
#--------------------------------------------------------------------------------------------------------------------------------
# check which distro we are building
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$RELEASE" == "trusty" ]; then
DISTRIBUTION="Ubuntu"
else
DISTRIBUTION="Debian"
fi
#--------------------------------------------------------------------------------------------------------------------------------
# Hostname
#
HOST="$BOARD"
#--------------------------------------------------------------------------------------------------------------------------------
# The name of the job
#--------------------------------------------------------------------------------------------------------------------------------
VERSION="${BOARD^} $DISTRIBUTION $REVISION $RELEASE $BRANCH"
#--------------------------------------------------------------------------------------------------------------------------------
# Load libraries
#--------------------------------------------------------------------------------------------------------------------------------
source $SRC/lib/configuration.sh # Board configuration
source $SRC/lib/boards.sh # Board specific install
source $SRC/lib/common.sh # Functions
#--------------------------------------------------------------------------------------------------------------------------------
# let's start with fresh screen
#--------------------------------------------------------------------------------------------------------------------------------
clear
#--------------------------------------------------------------------------------------------------------------------------------
# optimize build time with 100% CPU usage
#--------------------------------------------------------------------------------------------------------------------------------
CPUS=$(grep -c 'processor' /proc/cpuinfo)
if [ "$USEALLCORES" = "yes" ]; then
CTHREADS="-j$(($CPUS + $CPUS/2))";
else
CTHREADS="-j${CPUS}";
fi
#--------------------------------------------------------------------------------------------------------------------------------
# to display build time at the end
#--------------------------------------------------------------------------------------------------------------------------------
start=`date +%s`
#--------------------------------------------------------------------------------------------------------------------------------
# display what we are doing
#--------------------------------------------------------------------------------------------------------------------------------
echo "Building $VERSION."
#--------------------------------------------------------------------------------------------------------------------------------
# download packages for host
#--------------------------------------------------------------------------------------------------------------------------------
download_host_packages
clear
echo "Building $VERSION."
#--------------------------------------------------------------------------------------------------------------------------------
# fetch_from_github [repository, sub directory]
#--------------------------------------------------------------------------------------------------------------------------------
mkdir -p $DEST/output
fetch_from_github "$BOOTLOADER" "$BOOTSOURCE"
fetch_from_github "$LINUXKERNEL" "$LINUXSOURCE"
if [[ -n "$DOCS" ]]; then fetch_from_github "$DOCS" "$DOCSDIR"; fi
if [[ -n "$MISC1" ]]; then fetch_from_github "$MISC1" "$MISC1_DIR"; fi
if [[ -n "$MISC2" ]]; then fetch_from_github "$MISC2" "$MISC2_DIR"; fi
if [[ -n "$MISC3" ]]; then fetch_from_github "$MISC3" "$MISC3_DIR"; fi
if [[ -n "$MISC4" ]]; then fetch_from_github "$MISC4" "$MISC4_DIR"; fi
#--------------------------------------------------------------------------------------------------------------------------------
# grab linux kernel version from Makefile
#--------------------------------------------------------------------------------------------------------------------------------
VER=$(cat $DEST/$LINUXSOURCE/Makefile | grep VERSION | head -1 | awk '{print $(NF)}')
VER=$VER.$(cat $DEST/$LINUXSOURCE/Makefile | grep PATCHLEVEL | head -1 | awk '{print $(NF)}')
VER=$VER.$(cat $DEST/$LINUXSOURCE/Makefile | grep SUBLEVEL | head -1 | awk '{print $(NF)}')
EXTRAVERSION=$(cat $DEST/$LINUXSOURCE/Makefile | grep EXTRAVERSION | head -1 | awk '{print $(NF)}')
if [ "$EXTRAVERSION" != "=" ]; then VER=$VER$EXTRAVERSION; fi
#--------------------------------------------------------------------------------------------------------------------------------
# Compile source or choose already packed kernel
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$SOURCE_COMPILE" = "yes" ]; then
# Patching sources
patching_sources
# Compile boot loader
compile_uboot
# compile kernel and create archives
compile_kernel
else
# Compile u-boot if not exits in cache
CHOOSEN_UBOOT="$BOARD"_"$BRANCH"_u-boot_"$VER".tgz
if [ ! -f "$DEST/output/u-boot/$CHOOSEN_UBOOT" ]; then
compile_uboot
fi
# choose kernel from ready made
choosing_kernel
fi
#--------------------------------------------------------------------------------------------------------------------------------
# create or use prepared root file-system
#--------------------------------------------------------------------------------------------------------------------------------
create_debian_template
mount_debian_template
#--------------------------------------------------------------------------------------------------------------------------------
# install board specific applications
#--------------------------------------------------------------------------------------------------------------------------------
install_board_specific
#--------------------------------------------------------------------------------------------------------------------------------
# add kernel to the image
#--------------------------------------------------------------------------------------------------------------------------------
install_kernel
#--------------------------------------------------------------------------------------------------------------------------------
# install external applications
#--------------------------------------------------------------------------------------------------------------------------------
if [ "$EXTERNAL" = "yes" ]; then
install_external_applications
fi
#--------------------------------------------------------------------------------------------------------------------------------
# add some summary to the image
#--------------------------------------------------------------------------------------------------------------------------------
fingerprint_image "$DEST/output/sdcard/root/readme.txt"
#--------------------------------------------------------------------------------------------------------------------------------
# closing image
#--------------------------------------------------------------------------------------------------------------------------------
closing_image
end=`date +%s`
runtime=$(((end-start)/60))
echo "Runtime $runtime min."
|
RaymiiOrg/olimex-scripts-lib
|
main.sh
|
Shell
|
gpl-2.0
| 10,169 |
export PATH="$(dirname $0)/..:$PATH"
export ROOTDIR="$(dirname $0)/.."
load_shunit2() {
if [ -e /usr/share/shunit2/shunit2 ]; then
. /usr/share/shunit2/shunit2
else
. shunit2
fi
}
|
UnBsafeC/safec
|
tests/helper/test_helper.sh
|
Shell
|
gpl-2.0
| 196 |
#!/bin/bash
# MACHOST holdst the URL of the macintosh computer hosting the virtual machine.
# Script to generate binaries for Linux
# The only argument for the script is the release number
MACHOST=samson
export PATH=/opt/ocaml-3.10.2/bin:$PATH
LINUX_DIRECTORY=linux
mkdir linux
rm -f linux/*
if ! ./configure --enable-interface=html CFLAGS="-static -static-libgcc -O3 -msse3";then
echo "Failure in html interface configuration"
exit 1
fi
if ! make clean; then
echo "Could not clean!"
exit 1
fi
if ! make poy.native; then
echo "Failure in make step"
exit 1
fi
cp ./src/_build/poy.native ./$LINUX_DIRECTORY/seq_poy.command
# Now we make the ncurses interface
if ! ./configure --enable-interface=ncurses CFLAGS="-static -static-libgcc -msse3 -O3"; then
echo "Failure in ncurses interface configuration"
exit 1
fi
if ! make clean; then
echo "Could not clean!"
exit 1
fi
if ! make poy.native; then
echo "Failure in make step"
exit 1
fi
cp ./src/poy.native ./$LINUX_DIRECTORY/ncurses_poy
cat > ./${LINUX_DIRECTORY}/ncurses_poy.command <<EOF
#!/bin/bash
xterm -e ../Resources/ncurses_poy
EOF
chmod a+x ./${LINUX_DIRECTORY}/ncurses_poy.command
if ! scp -Cr ./${LINUX_DIRECTORY} ${MACHOST}:poy_distro/source_code/binaries/; then
echo "Failed copy step"
exit 1
fi
|
amnh/poy4
|
linux_builder.sh
|
Shell
|
gpl-2.0
| 1,311 |
#!/bin/bash
### BEGIN INIT INFO
#
# Provides: fence_station.py
# Required-Start: $remote_fs
# Required-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: fence weather station initscript
#
### END INIT INFO
## Fill in name of program here.
PROG="fence_station.py"
PROG_PATH="/home/pi/py_scripts/wx_station"
PROG_ARGS=""
PIDFILE="/var/run/fence_station.pid"
start() {
if [ -e $PIDFILE ]; then
## Program is running, exit with error.
echo "Error! $PROG is currently running!" 1>&2
exit 1
else
## Change from /dev/null to something like /var/log/$PROG if you want to save output.
cd $PROG_PATH
./$PROG $PROG_ARGS 2>&1 >/dev/null &
echo "$PROG started"
touch $PIDFILE
fi
}
stop() {
if [ -e $PIDFILE ]; then
## Program is running, so stop it
echo "$PROG is running"
killall $PROG
rm -f $PIDFILE
echo "$PROG stopped"
else
## Program is not running, exit with error.
echo "Error! $PROG not started!" 1>&2
exit 1
fi
}
## Check to see if we are running as root first.
## Found at http://www.cyberciti.biz/tips/shell-root-user-check-script.html
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
case "$1" in
start)
start
exit 0
;;
stop)
stop
exit 0
;;
reload|restart|force-reload)
stop
start
exit 0
;;
**)
echo "Usage: $0 {start|stop|reload}" 1>&2
exit 1
;;
esac
exit 0
|
IslePilot/wx_station
|
fencestation.sh
|
Shell
|
gpl-2.0
| 1,678 |
#!/bin/sh
[ "$PAPARAZZI_HOME" = "" ] && printf "$0: failed! PAPARAZZI_HOME not set\n\n" && exit 1
PATH=/usr/bin:/bin
cd $PAPARAZZI_HOME
# Configuration sources
OCDSCRIPTS=/usr/share/openocd/scripts
# Configuration destination relativ to PAPARAZZI_HOME
OCDCONFDIR=var
# functions
inline_edit()
{
# remove Warning about 12Mhz, we have 12Mhz.
sed -i 's/ echo/ # &/' "$1"
}
# Olimex ARM-USB-OCD config
CONFIG=$OCDCONFDIR/openocd-arm-usb-ocd.cfg
if [ ! -s $CONFIG ] ; then
mkdir -p $OCDCONFDIR
cat \
$OCDSCRIPTS/interface/olimex-arm-usb-ocd.cfg \
$OCDSCRIPTS/target/lpc2148.cfg > $CONFIG
inline_edit $CONFIG
fi
# Olimex ARM-USB-OCD-H config
CONFIG_H=$OCDCONFDIR/openocd-arm-usb-ocd-h.cfg
if [ ! -s $CONFIG_H ] ; then
mkdir -p $OCDCONFDIR
cat \
$OCDSCRIPTS/interface/olimex-arm-usb-ocd-h.cfg \
$OCDSCRIPTS/target/lpc2148.cfg > $CONFIG_H
inline_edit $CONFIG_H
fi
# Symlink to User defined configuration
OPENOCD_CFG=$OCDCONFDIR/openocd.cfg
if [ ! -h $OPENOCD_CFG ] ; then
printf "Which version ARM-USB-OCD or ARM-USB-OCD-H as default ?\n"
printf "<return> | [hH]"
read a
if [ "$a" = "h" ] || [ "$a" = "H" ] ; then
ln -s $( basename $CONFIG_H ) $OPENOCD_CFG
else
ln -s $( basename $CONFIG ) $OPENOCD_CFG
fi
fi
# Stop all running openocd
killall openocd >/dev/null 2>&1
sleep 0.5
# Start new OPENOCD
exec openocd -f $OPENOCD_CFG "$@" > openocd.log 2>&1 &
|
elemhsb/mallorca
|
conf/system/HB/bin/openocd.sh
|
Shell
|
gpl-2.0
| 1,406 |
avrdude -pm32 -cusbasp -u -U flash:w:nitrat-meter/Release/nitrat-meter.hex:i
|
trol73/avr-nitrat-meter
|
write_firmware.sh
|
Shell
|
gpl-2.0
| 77 |
#!/bin/bash
#FFMPEG installation script
# Copyright (C) 2007-2014 Sherin.co.in. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
RED='\033[01;31m'
RESET='\033[0m'
INSTALL_SDIR='/usr/src/ffmpegscript'
SOURCE_URL='http://mirror.ffmpeginstaller.com/source/fdkaac'
INSTALL_DDIR='/usr/local/cpffmpeg'
export cpu=`cat "/proc/cpuinfo" | grep "processor"|wc -l`
export TMPDIR=$HOME/tmp
_package='fdk-aac-0.1.3.tar.gz'
clear
sleep 2
echo -e $RED"Installation of $_package ....... started"$RESET
cd $INSTALL_SDIR/
#rm -rf fdk-aac*
wget $SOURCE_URL/$_package
tar -xvzf $_package
cd fdk-aac-0.1.3/
./configure --prefix=$INSTALL_DDIR
make -j$cpu
make install
echo -e $RED"Installation of $_package ....... Completed"$RESET
sleep 2
|
ravensnowbird/ffmpeg-ubuntu-14-04
|
fdkaac.sh
|
Shell
|
gpl-2.0
| 1,411 |
#!/bin/sh
set -eu
VERSION="$(sed -rne 's,^VERSION="(.+)",\1,p' hibernate.sh)"
PREFIX="hibernate-script-"
TARGETDIR="../$PREFIX$VERSION"
TARBALL="../$PREFIX$VERSION.tar.gz"
if [ -f "$TARBALL" ]; then
echo "E: $TARBALL already exists." >&2
exit 1
fi
if [ -d .git ] && [ -f .git/config ]; then
git archive --prefix=$PREFIX$VERSION/ HEAD | gzip -9 > $TARBALL
exit 0
fi
if [ -d "$TARGETDIR" ]; then
echo "E: $TARGETDIR already exists." >&2
exit 1
fi
svn export --quiet . "$TARGETDIR"
tar -czC .. -f "$TARBALL" "${TARGETDIR#../}"
echo "tarball created: $TARBALL" >&2
rm -r "$TARGETDIR"
exit 0
|
NigelCunningham/Hibernate-Script
|
mktarball.sh
|
Shell
|
gpl-2.0
| 608 |
#!/bin/bash
# =====================================================================================================
# Copyright (C) steady.sh v1.2 2016 parsa alemi (@parsaalemi1)
# =====================================================================================================
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# this program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
# =======================================================================================================
# It depends on Tmux https://github.com/tmux/tmux which is BSD-licensed
# and Screen https://www.gnu.org/software/screen GNU-licensed.
# =======================================================================================================
# This script is intended to control the state of a telegram-cli telegram bot running in background.
# The idea is to get the bot fully operative all the time without any supervision by the user.
# It should be able to recover the telegram bot in any case telegram-cli crashes, freezes or whatever.
# This script works by tracing ctxt swithes value in kernel procces at a $RELOADTIME
# So it can detect any kind of kernel interruption with the procces and reload the bot.
# Some script variables
OK=0
BAD=0
NONVOLUNTARY=1
NONVOLUNTARYCHECK=0
VOLUNTARY=1
VOLUNTARYCHECK=0
I=1
BOT=golem # You can put here other bots. Also you can change it to run more than one bot in the same server.
RELOADTIME=2 # Time between checking cpu calls of the cli process. Set the value high if your bot does not receive lots of messages.
function tmux_mode {
sleep 0.5
clear
# Space invaders thanks to github.com/windelicato
f=3 b=4
for j in f b; do
for i in {0..7}; do
printf -v $j$i %b "\e[${!j}${i}m"
done
done
bld=$'\e[1m'
rst=$'\e[0m'
cat << EOF
$f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$f1█▀███████▀█ $f2▀▀███▀▀███▀▀ $f3▀█▀██▀█▀ $f4█▀███████▀█ $f5▀▀███▀▀███▀▀ $f6▀█▀██▀█▀$rst
$f1▀ ▀▄▄ ▄▄▀ ▀ $f2 ▀█▄ ▀▀ ▄█▀ $f3▀▄ ▄▀ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5 ▀█▄ ▀▀ ▄█▀ $f6▀▄ ▄▀$rst
EOF
echo -e " \e[100m Steady script \e[00;37;40m"
echo -e " \e[01;34m by parsa alemi \e[00;37;40m"
echo ""
cat << EOF
$bld$f1▄ ▀▄ ▄▀ ▄ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4▄ ▀▄ ▄▀ ▄ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$bld$f1█▄█▀███▀█▄█ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4█▄█▀███▀█▄█ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$bld$f1▀█████████▀ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4▀█████████▀ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst
$bld$f1 ▄▀ ▀▄ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4 ▄▀ ▀▄ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst
EOF
sleep 1.2
# Checking if the bot folder is in HOME
echo -e "$bld$f4 CHECKING INSTALLED BOT...$rst"
sleep 0.5
ls ../ | grep $BOT 2>/dev/null >/dev/null
if [ $? != 0 ]; then
echo -e "$f1 ERROR: BOT: $BOT NOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 4
exit 1
fi
echo -e "$f2 $BOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 0.5
echo ""
echo -e "\033[38;5;208m _ __ __ _ _ __ ___ __ _ \033[0;00m"
echo -e "\033[38;5;208m| '_ \ / _` | '__/ __|/ _` | \033[0;00m"
echo -e "\033[38;5;208m| |_) | (_| | | \__ \ (_| | \033[0;00m"
echo -e "\033[38;5;208m| .__/ \__,_|_| |___/\__,_| \033[0;00m"
echo -e "\033[38;5;208m|_| \033[0;00m"
sleep 1.5
echo -e "$bld$f4 CHECKING PROCESSES...$rst"
sleep 0.7
# Looks for the number of screen/telegram-cli processes
CLINUM=`ps -e | grep -c telegram-cli`
echo "$f2 RUNNING $CLINUM TELEGRAM-CLI PROCESS$rst"
sleep 0.9
# =====Setup ends===== #
# Opening new tmux in a daemon
echo -e "$bld$f4 ATTACHING TMUX AS DAEMON...$rst"
# It is recommended to clear cli status always before starting the bot
rm ../.telegram-cli/state 2>/dev/null
# Nested TMUX sessions trick
TMUX= tmux new-session -d -s $BOT "./launch.sh"
sleep 1.3
CLIPID=`ps -e | grep telegram-cli | head -1 | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
echo -e "$f2 NEW TELEGRAM-CLI PROCESS: $CLIPID$rst"
echo ""
echo ""
# Locating telegram-cli status
cat /proc/$CLIPID/task/$CLIPID/status > STATUS
NONVOLUNTARY=`grep nonvoluntary STATUS | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
sleep 3
# :::::::::::::::::::::::::
# ::::::: MAIN LOOP :::::::
# :::::::::::::::::::::::::
while true; do
echo -e "$f2 TIMES CHECKED AND RUNNING:$f5 $OK $rst"
echo -e "$f2 TIMES FAILED AND RECOVERED:$f5 $BAD $rst"
echo ""
cat /proc/$CLIPID/task/$CLIPID/status > CHECK
if [ $? != 0 ]; then
I=$(( $I + 1 ))
if [ $I -ge 3 ]; then
kill $CLIPID
tmux kill-session -t $BOT
rm ../.telegram-cli/state 2>/dev/null
NONVOLUNTARY=0
NONVOLUNTARYCHECK=0
VOLUNTARY=0
VOLUNTARYCHECK=0
fi
else
I=1
fi
VOLUNTARYCHECK=`grep voluntary CHECK | head -1 | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
NONVOLUNTARYCHECK=`grep nonvoluntary CHECK | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
if [ $NONVOLUNTARY != $NONVOLUNTARYCHECK ] || [ $VOLUNTARY != $VOLUNTARYCHECK ]; then
echo -e "$f5 BOT RUNNING!$rst"
OK=$(( $OK + 1 ))
else
echo -e "$f5 BOT NOT RUNING, TRYING TO RELOAD IT...$rst"
BAD=$(( $BAD + 1 ))
sleep 1
rm ../.telegram-cli/state 2>/dev/null
kill $CLIPID
tmux kill-session -t $BOT
TMUX= tmux new-session -d -s $BOT "./launch.sh"
sleep 1
CLIPID=`ps -e | grep telegram-cli | head -1 | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
if [ -z "${CLIPID}" ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PROCESS NOT RUNNING$rst"
echo -e "$f1 FAILED TO RECOVER BOT$rst"
sleep 3
exit 1
fi
fi
# Clear cache after 10h
if [ "$OK" == 2400 ]; then
sync
sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches'
fi
VOLUNTARY=`echo $VOLUNTARYCHECK`
NONVOLUNTARY=`echo $NONVOLUNTARYCHECK`
sleep $RELOADTIME
rm CHECK
done
}
function screen_mode {
clear
sleep 0.5
# Space invaders thanks to github.com/windelicato
f=3 b=4
for j in f b; do
for i in {0..7}; do
printf -v $j$i %b "\e[${!j}${i}m"
done
done
bld=$'\e[1m'
rst=$'\e[0m'
cat << EOF
$f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$f1█▀███████▀█ $f2▀▀███▀▀███▀▀ $f3▀█▀██▀█▀ $f4█▀███████▀█ $f5▀▀███▀▀███▀▀ $f6▀█▀██▀█▀$rst
$f1▀ ▀▄▄ ▄▄▀ ▀ $f2 ▀█▄ ▀▀ ▄█▀ $f3▀▄ ▄▀ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5 ▀█▄ ▀▀ ▄█▀ $f6▀▄ ▄▀$rst
EOF
echo -e " \e[100m Steady script \e[00;37;40m"
echo -e " \e[01;34m by parsa alemi \e[00;37;40m"
echo ""
cat << EOF
$bld$f1▄ ▀▄ ▄▀ ▄ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4▄ ▀▄ ▄▀ ▄ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
$bld$f1█▄█▀███▀█▄█ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4█▄█▀███▀█▄█ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
$bld$f1▀█████████▀ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4▀█████████▀ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst
$bld$f1 ▄▀ ▀▄ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4 ▄▀ ▀▄ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst
EOF
sleep 1.3
# Checking if the bot folder is in HOME
echo -e "$bld$f4 CHECKING INSTALLED BOT...$rst"
sleep 0.5
ls ../ | grep $BOT 2>/dev/null >/dev/null
if [ $? != 0 ]; then
echo -e "$f1 ERROR: BOT: $BOT NOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 4
exit 1
fi
echo -e "$f2 $BOT FOUND IN YOUR HOME DIRECTORY$rst"
sleep 0.5
echo ""
echo -e "\033[38;5;208m _ __ __ _ _ __ ___ __ _ \033[0;00m"
echo -e "\033[38;5;208m| '_ \ / _` | '__/ __|/ _` | \033[0;00m"
echo -e "\033[38;5;208m| |_) | (_| | | \__ \ (_| | \033[0;00m"
echo -e "\033[38;5;208m| .__/ \__,_|_| |___/\__,_| \033[0;00m"
echo -e "\033[38;5;208m|_| \033[0;00m"
# Starting preliminar setup
sleep 1.5
echo -e "$bld$f4 CHECKING PROCESSES...$rst"
sleep 0.7
# Looks for the number of screen/telegram-cli processes
SCREENNUM=`ps -e | grep -c screen`
CLINUM=`ps -e | grep -c telegram-cli`
if [ $SCREENNUM -ge 3 ]; then
echo -e "$f1 ERROR: MORE THAN 2 PROCESS OF SCREEN RUNNING.$rst"
echo -e "$f1 THESE PROCESSES HAVE BE KILLED. THEN RESTART THE SCRIPT$rst"
echo -e '$f1 RUN: "killall screen" $rst'
if [ $CLINUM -ge 2 ]; then
echo -e "$f1 ERROR: MORE THAN 1 PROCESS OF TELEGRAM-CLI RUNNING.$rst"
echo -e "$f1 THESE PROCESSES WILL BE KILLED. THEN RESTART THE SCRIPT$rst"
echo -e "$f1 RUN: killall telegram-cli $rst"
fi
sleep 4
exit 1
fi
echo "$f2 SCREEN NUMBER AND CLI NUMBER UNDER THE SUPPORTED LIMIT"
sleep 0.7
echo "$f2 RUNNING $SCREENNUM SCREEN PROCESS$rst"
echo "$f2 RUNNING $CLINUM TELEGRAM-CLI PROCESS$rst"
sleep 0.9
# Getting screen pid's
ps -e | grep screen | sed 's/^[[:space:]]*//' | cut -f 1 -d" " | while read -r line ; do
sleep 0.5
echo -e "$f2 SCREEN NUMBER $I PID: $line$rst"
if [ $I -eq 1 ]; then
echo $line > SC1
else
echo $line > SC2
fi
I=$(( $I + 1 ))
done
# I had some weird errors, so I had to do this silly fix:
SCREENPID1=`cat SC1`
SCREENPID2=`cat SC2`
rm SC1 SC2 2>/dev/null
sleep 0.7
CLIPID=`ps -e | grep telegram-cli | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
if [ $CLINUM -eq 1 ]; then
echo -e "$f2 RUNNING ONE PROCESS OF TELEGRAM-CLI: $CLIPID1$rst"
echo -e "$bld$f4 KILLING TELEGRAM-CLI PROCESS. NOT NEEDED NOW$rst"
kill $CLIPID1
else
echo -e "$f2 RUNNING ZERO PROCESS OF TELEGRAM-CLI$rst"
fi
sleep 0.7
CLINUM=`ps -e | grep -c telegram-cli`
if [ $CLINUM -eq 1 ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PID COULDN'T BE KILLED. IGNORE.$rst"
fi
sleep 1
# =====Setup ends===== #
# Opening new screen in a daemon
echo -e "$bld$f4 ATTACHING SCREEN AS DAEMON...$rst"
# Better to clear cli status before
rm ../.telegram-cli/state 2>/dev/null
screen -d -m bash launch.sh
sleep 1.3
SCREENNUM=`ps -e | grep -c screen`
if [ $SCREENNUM != 3 ]; then
echo -e "$f1 ERROR: SCREEN RUNNING: $SCREENNUM \n SCREEN ESPECTED: 3$rst"
exit 1
fi
# Getting screen info
sleep 0.7
echo -e "$bld$f4 RELOADING SCREEN INFO...$rst"
sleep 1
echo -e "$f2 NUMBER OF SCREEN ATTACHED: $SCREENNUM$rst"
echo -e "$f2 SECONDARY SCREEN: $SCREENPID1 AND $SCREENPID2$rst"
SCREEN=`ps -e | grep -v $SCREENPID1 | grep -v $SCREENPID2 | grep screen | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
sleep 0.5
echo -e "$f2 PRIMARY SCREEN: $SCREEN$rst"
sleep 0.7
echo -e "$bld$f4 RELOADING TELEGRAM-CLI INFO...$rst"
sleep 0.7
# Getting new telegram-cli PID
CLIPID=`ps -e | grep telegram-cli | sed 's/^[[:space:]]*//' |cut -f 1 -d" "`
echo -e "$f2 NEW TELEGRAM-CLI PID: $CLIPID$rst"
if [ -z "${CLIPID}" ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PROCESS NOT RUNNING$rst"
sleep 3
exit 1
fi
# Locating telegram-cli status
cat /proc/$CLIPID/task/$CLIPID/status > STATUS
NONVOLUNTARY=`grep nonvoluntary STATUS | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
sleep 5
# :::::::::::::::::::::::::
# ::::::: MAIN LOOP :::::::
# :::::::::::::::::::::::::
while true; do
echo -e "$f2 TIMES CHECKED AND RUNNING:$f5 $OK $rst"
echo -e "$f2 TIMES FAILED AND RECOVERED:$f5 $BAD $rst"
echo ""
cat /proc/$CLIPID/task/$CLIPID/status > CHECK
if [ $? != 0 ]; then
I=$(( $I + 1 ))
if [ $I -ge 3 ]; then
rm ../.telegram-cli/state 2>/dev/null
NONVOLUNTARY=0
NONVOLUNTARYCHECK=0
VOLUNTARY=0
VOLUNTARYCHECK=0
fi
else
I=1
fi
VOLUNTARYCHECK=`grep voluntary CHECK | head -1 | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
NONVOLUNTARYCHECK=`grep nonvoluntary CHECK | cut -f 2 -d":" | sed 's/^[[:space:]]*//'`
if [ $NONVOLUNTARY != $NONVOLUNTARYCHECK ] || [ $VOLUNTARY != $VOLUNTARYCHECK ]; then
echo -e "$f5 BOT RUNNING!$rst"
OK=$(( $OK + 1 ))
else
echo -e "$f5 BOT NOT RUNING, TRYING TO RELOAD IT...$rst"
BAD=$(( $BAD + 1 ))
sleep 1
rm ../.telegram-cli/state 2>/dev/null
kill $CLIPID
kill $SCREEN
screen -d -m bash launch.sh
sleep 1
CLIPID=`ps -e | grep telegram-cli | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
if [ -z "${CLIPID}" ]; then
echo -e "$f1 ERROR: TELEGRAM-CLI PROCESS NOT RUNNING$rst"
echo -e "$f1 FAILED TO RECOVER BOT$rst"
sleep 1
fi
SCREENNUM=`ps -e | grep -c screen`
if [ $SCREENNUM != 3 ]; then
echo -e "$f1 ERROR: SCREEN RUNNING: $SCREENNUM \n SCREEN ESPECTED: 3$rst"
echo -e "$f1 FAILED TO RECOVER BOT$rst"
exit 1
fi
SCREEN=`ps -e | grep -v $SCREENPID1 | grep -v $SCREENPID2 | grep screen | sed 's/^[[:space:]]*//' | cut -f 1 -d" "`
echo -e "$f5 BOT HAS BEEN SUCCESFULLY RELOADED!$rst"
echo -e "$f2 TELEGRAM-CLI NEW PID: $CLIPID$rst"
echo -e "$f2 SCREEN NEW PID: $SCREEN$rst"
sleep 3
fi
# Clear cache after 10h
if [ "$OK" == 2400 ]; then
sync
sudo sh -c 'echo 3 > /proc/sys/vm/drop_caches'
fi
VOLUNTARY=`echo $VOLUNTARYCHECK`
NONVOLUNTARY=`echo $NONVOLUNTARYCHECK`
sleep $RELOADTIME
rm CHECK
done
}
function tmux_detached {
clear
TMUX= tmux new-session -d -s script_detach "bash steady.sh -t"
echo -e "\e[1m"
echo -e ""
echo "Bot running in the backgroud with TMUX"
echo ""
echo -e "\e[0m"
sleep 3
tmux kill-session script 2>/dev/null
exit 1
}
function screen_detached {
clear
screen -d -m bash launch.sh
echo -e "\e[1m"
echo -e ""
echo "Bot running in the backgroud with SCREEN"
echo ""
echo -e "\e[0m"
sleep 3
quit
exit 1
}
if [ $# -eq 0 ]
then
echo -e "\e[1m"
echo -e ""
echo "Missing options!"
echo "Run: bash steady.sh -h for help!"
echo ""
echo -e "\e[0m"
sleep 1
exit 1
fi
while getopts ":tsTSih" opt; do
case $opt in
t)
echo -e "\e[1m"
echo -e ""
echo "TMUX multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
tmux_mode
exit 1
;;
s)
echo -e "\e[1m"
echo -e ""
echo "SCREEN multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
screen_mode
exit 1
;;
T)
echo -e "\e[1m"
echo -e ""
echo "TMUX multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
tmux_detached
exit 1
;;
S)
echo -e "\e[1m"
echo -e ""
echo "SCREEN multiplexer option has been triggered." >&2
echo "Starting script..."
sleep 1.5
echo -e "\e[0m"
screen_detached
exit 1
;;
i)
echo -e "\e[1m"
echo -e ""
echo "steady.sh bash script v1.2 parsa alemi 2016 DBTeam" >&2
echo ""
echo -e "\e[0m"
echo -e "\033[38;5;208m _ __ __ _ _ __ ___ __ _ \033[0;00m"
echo -e "\033[38;5;208m| '_ \ / _` | '__/ __|/ _` | \033[0;00m"
echo -e "\033[38;5;208m| |_) | (_| | | \__ \ (_| | \033[0;00m"
echo -e "\033[38;5;208m| .__/ \__,_|_| |___/\__,_| \033[0;00m"
echo -e "\033[38;5;208m|_| \033[0;00m"
echo ""
exit 1
;;
h)
echo -e "\e[1m"
echo -e ""
echo "Usage:"
echo -e ""
echo "steady.sh -t"
echo "steady.sh -s"
echo "steady.sh -T"
echo "steady.sh -S"
echo "steady.sh -h"
echo "steady.sh -i"
echo ""
echo "Options:"
echo ""
echo " -t select TMUX terminal multiplexer"
echo " -s select SCREEN terminal multiplexer"
echo " -T select TMUX and detach session after start"
echo " -S select SCREEN and detach session after start"
echo " -h script options help page"
echo " -i information about the script"
echo -e "\e[0m"
exit 1
;;
\?)
echo -e "\e[1m"
echo -e ""
echo "Invalid option: -$OPTARG" >&2
echo "Run bash $0 -h for help"
echo -e "\e[0m"
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
|
MrAL-i/SPIRAN
|
steady.sh
|
Shell
|
gpl-2.0
| 17,683 |
#!/bin/sh
# Copyright Abandoned 1996 TCX DataKonsult AB & Monty Program KB & Detron HB
# This file is public domain and comes with NO WARRANTY of any kind
#
# Script to start the MySQL daemon and restart it if it dies unexpectedly
#
# This should be executed in the MySQL base directory if you are using a
# binary installation that is not installed in its compile-time default
# location
#
# mysql.server works by first doing a cd to the base directory and from there
# executing mysqld_safe
# Initialize script globals
KILL_MYSQLD=1;
MYSQLD=
niceness=0
mysqld_ld_preload=
mysqld_ld_library_path=
# Initial logging status: error log is not open, and not using syslog
logging=init
want_syslog=0
syslog_tag=
user='@MYSQLD_USER@'
pid_file=
err_log=
syslog_tag_mysqld=mysqld
syslog_tag_mysqld_safe=mysqld_safe
trap '' 1 2 3 15 # we shouldn't let anyone kill us
trap '' 13 # not even SIGPIPE
# MySQL-specific environment variable. First off, it's not really a umask,
# it's the desired mode. Second, it follows umask(2), not umask(3) in that
# octal needs to be explicit. Our shell might be a proper sh without printf,
# multiple-base arithmetic, and binary arithmetic, so this will get ugly.
# We reject decimal values to keep things at least half-sane.
umask 007 # fallback
UMASK="${UMASK-0640}"
fmode=`echo "$UMASK" | sed -e 's/[^0246]//g'`
octalp=`echo "$fmode"|cut -c1`
fmlen=`echo "$fmode"|wc -c|sed -e 's/ //g'`
if [ "x$octalp" != "x0" -o "x$UMASK" != "x$fmode" -o "x$fmlen" != "x5" ]
then
fmode=0640
echo "UMASK must be a 3-digit mode with an additional leading 0 to indicate octal." >&2
echo "The first digit will be corrected to 6, the others may be 0, 2, 4, or 6." >&2
fi
fmode=`echo "$fmode"|cut -c3-4`
fmode="6$fmode"
if [ "x$UMASK" != "x0$fmode" ]
then
echo "UMASK corrected from $UMASK to 0$fmode ..."
fi
defaults=
case "$1" in
--no-defaults|--defaults-file=*|--defaults-extra-file=*)
defaults="$1"; shift
;;
esac
usage () {
cat <<EOF
Usage: $0 [OPTIONS]
--no-defaults Don't read the system defaults file
--defaults-file=FILE Use the specified defaults file
--defaults-extra-file=FILE Also use defaults from the specified file
--ledir=DIRECTORY Look for mysqld in the specified directory
--open-files-limit=LIMIT Limit the number of open files
--core-file-size=LIMIT Limit core files to the specified size
--timezone=TZ Set the system timezone
--malloc-lib=LIB Preload shared library LIB if available
--mysqld=FILE Use the specified file as mysqld
--mysqld-version=VERSION Use "mysqld-VERSION" as mysqld
--nice=NICE Set the scheduling priority of mysqld
--plugin-dir=DIR Plugins are under DIR or DIR/VERSION, if
VERSION is given
--skip-kill-mysqld Don't try to kill stray mysqld processes
--syslog Log messages to syslog with 'logger'
--skip-syslog Log messages to error log (default)
--syslog-tag=TAG Pass -t "mysqld-TAG" to 'logger'
All other options are passed to the mysqld program.
EOF
exit 1
}
my_which ()
{
save_ifs="${IFS-UNSET}"
IFS=:
ret=0
for file
do
for dir in $PATH
do
if [ -f "$dir/$file" ]
then
echo "$dir/$file"
continue 2
fi
done
ret=1 #signal an error
break
done
if [ "$save_ifs" = UNSET ]
then
unset IFS
else
IFS="$save_ifs"
fi
return $ret # Success
}
log_generic () {
priority="$1"
shift
msg="`date +'%y%m%d %H:%M:%S'` mysqld_safe $*"
echo "$msg"
case $logging in
init) ;; # Just echo the message, don't save it anywhere
file) echo "$msg" >> "$err_log" ;;
syslog) logger -t "$syslog_tag_mysqld_safe" -p "$priority" "$*" ;;
*)
echo "Internal program error (non-fatal):" \
" unknown logging method '$logging'" >&2
;;
esac
}
log_error () {
log_generic daemon.error "$@" >&2
}
log_notice () {
log_generic daemon.notice "$@"
}
eval_log_error () {
local cmd="$1"
case $logging in
file) cmd="$cmd >> "`shell_quote_string "$err_log"`" 2>&1" ;;
syslog)
# mysqld often prefixes its messages with a timestamp, which is
# redundant when logging to syslog (which adds its own timestamp)
# However, we don't strip the timestamp with sed here, because
# sed buffers output (only GNU sed supports a -u (unbuffered) option)
# which means that messages may not get sent to syslog until the
# mysqld process quits.
cmd="$cmd 2>&1 | logger -t '$syslog_tag_mysqld' -p daemon.error"
;;
*)
echo "Internal program error (non-fatal):" \
" unknown logging method '$logging'" >&2
;;
esac
#echo "Running mysqld: [$cmd]"
eval "$cmd"
}
shell_quote_string() {
# This sed command makes sure that any special chars are quoted,
# so the arg gets passed exactly to the server.
echo "$1" | sed -e 's,\([^a-zA-Z0-9/_.=-]\),\\\1,g'
}
wsrep_pick_url() {
[ $# -eq 0 ] && return 0
log_error "WSREP: 'wsrep_urls' is DEPRECATED! Use wsrep_cluster_address to specify multiple addresses instead."
if ! which nc >/dev/null; then
log_error "ERROR: nc tool not found in PATH! Make sure you have it installed."
return 1
fi
local url
# Assuming URL in the form scheme://host:port
# If host and port are not NULL, the liveness of URL is assumed to be tested
# If port part is absent, the url is returned literally and unconditionally
# If every URL has port but none is reachable, nothing is returned
for url in `echo $@ | sed s/,/\ /g` 0; do
local host=`echo $url | cut -d \: -f 2 | sed s/^\\\/\\\///`
local port=`echo $url | cut -d \: -f 3`
[ -z "$port" ] && break
nc -z "$host" $port >/dev/null && break
done
if [ "$url" == "0" ]; then
log_error "ERROR: none of the URLs in '$@' is reachable."
return 1
fi
echo $url
}
# Run mysqld with --wsrep-recover and parse recovered position from log.
# Position will be stored in wsrep_start_position_opt global.
wsrep_start_position_opt=""
wsrep_recover_position() {
local mysqld_cmd="$@"
local euid=$(id -u)
local ret=0
local wr_logfile=$(mktemp $DATADIR/wsrep_recovery.XXXXXX)
[ "$euid" = "0" ] && chown $user $wr_logfile
chmod 600 $wr_logfile
local wr_pidfile="$DATADIR/"`@HOSTNAME@`"-recover.pid"
local wr_options="--log_error='$wr_logfile' --pid-file='$wr_pidfile'"
log_notice "WSREP: Running position recovery with $wr_options"
eval_log_error "$mysqld_cmd --wsrep_recover $wr_options"
local rp="$(grep 'WSREP: Recovered position:' $wr_logfile)"
if [ -z "$rp" ]; then
local skipped="$(grep WSREP $wr_logfile | grep 'skipping position recovery')"
if [ -z "$skipped" ]; then
log_error "WSREP: Failed to recover position: " `cat $wr_logfile`;
ret=1
else
log_notice "WSREP: Position recovery skipped"
fi
else
local start_pos="$(echo $rp | sed 's/.*WSREP\:\ Recovered\ position://' \
| sed 's/^[ \t]*//')"
log_notice "WSREP: Recovered position $start_pos"
wsrep_start_position_opt="--wsrep_start_position=$start_pos"
fi
rm $wr_logfile
return $ret
}
parse_arguments() {
# We only need to pass arguments through to the server if we don't
# handle them here. So, we collect unrecognized options (passed on
# the command line) into the args variable.
pick_args=
if test "$1" = PICK-ARGS-FROM-ARGV
then
pick_args=1
shift
fi
for arg do
# the parameter after "=", or the whole $arg if no match
val=`echo "$arg" | sed -e 's;^--[^=]*=;;'`
# what's before "=", or the whole $arg if no match
optname=`echo "$arg" | sed -e 's/^\(--[^=]*\)=.*$/\1/'`
# replace "_" by "-" ; mysqld_safe must accept "_" like mysqld does.
optname_subst=`echo "$optname" | sed 's/_/-/g'`
arg=`echo $arg | sed "s/^$optname/$optname_subst/"`
case "$arg" in
# these get passed explicitly to mysqld
--basedir=*) MY_BASEDIR_VERSION="$val" ;;
--datadir=*) DATADIR="$val" ;;
--pid-file=*) pid_file="$val" ;;
--plugin-dir=*) PLUGIN_DIR="$val" ;;
--user=*) user="$val"; SET_USER=1 ;;
# these might have been set in a [mysqld_safe] section of my.cnf
# they are added to mysqld command line to override settings from my.cnf
--log-error=*) err_log="$val" ;;
--port=*) mysql_tcp_port="$val" ;;
--socket=*) mysql_unix_port="$val" ;;
# mysqld_safe-specific options - must be set in my.cnf ([mysqld_safe])!
--core-file-size=*) core_file_size="$val" ;;
--ledir=*) ledir="$val" ;;
--malloc-lib=*) set_malloc_lib "$val" ;;
--mysqld=*)
if [ -z "$pick_args" ]; then
log_error "--mysqld option can only be used as command line option, found in config file"
exit 1
fi
MYSQLD="$val" ;;
--mysqld-version=*)
if [ -z "$pick_args" ]; then
log_error "--mysqld-version option can only be used as command line option, found in config file"
exit 1
fi
if test -n "$val"
then
MYSQLD="mysqld-$val"
PLUGIN_VARIANT="/$val"
else
MYSQLD="mysqld"
fi
;;
--nice=*) niceness="$val" ;;
--open-files-limit=*) open_files="$val" ;;
--open_files_limit=*) open_files="$val" ;;
--skip-kill-mysqld*) KILL_MYSQLD=0 ;;
--syslog) want_syslog=1 ;;
--skip-syslog) want_syslog=0 ;;
--syslog-tag=*) syslog_tag="$val" ;;
--timezone=*) TZ="$val"; export TZ; ;;
--wsrep[-_]urls=*) wsrep_urls="$val"; ;;
--wsrep[-_]provider=*)
if test -n "$val" && test "$val" != "none"
then
wsrep_restart=1
fi
;;
--help) usage ;;
*)
if test -n "$pick_args"
then
append_arg_to_args "$arg"
fi
;;
esac
done
}
# Add a single shared library to the list of libraries which will be added to
# LD_PRELOAD for mysqld
#
# Since LD_PRELOAD is a space-separated value (for historical reasons), if a
# shared lib's path contains spaces, that path will be prepended to
# LD_LIBRARY_PATH and stripped from the lib value.
add_mysqld_ld_preload() {
lib_to_add="$1"
log_notice "Adding '$lib_to_add' to LD_PRELOAD for mysqld"
case "$lib_to_add" in
*' '*)
# Must strip path from lib, and add it to LD_LIBRARY_PATH
lib_file=`basename "$lib_to_add"`
case "$lib_file" in
*' '*)
# The lib file itself has a space in its name, and can't
# be used in LD_PRELOAD
log_error "library name '$lib_to_add' contains spaces and can not be used with LD_PRELOAD"
exit 1
;;
esac
lib_path=`dirname "$lib_to_add"`
lib_to_add="$lib_file"
[ -n "$mysqld_ld_library_path" ] && mysqld_ld_library_path="$mysqld_ld_library_path:"
mysqld_ld_library_path="$mysqld_ld_library_path$lib_path"
;;
esac
# LD_PRELOAD is a space-separated
[ -n "$mysqld_ld_preload" ] && mysqld_ld_preload="$mysqld_ld_preload "
mysqld_ld_preload="${mysqld_ld_preload}$lib_to_add"
}
# Returns LD_PRELOAD (and LD_LIBRARY_PATH, if needed) text, quoted to be
# suitable for use in the eval that calls mysqld.
#
# All values in mysqld_ld_preload are prepended to LD_PRELOAD.
mysqld_ld_preload_text() {
text=
if [ -n "$mysqld_ld_preload" ]; then
new_text="$mysqld_ld_preload"
[ -n "$LD_PRELOAD" ] && new_text="$new_text $LD_PRELOAD"
text="${text}LD_PRELOAD="`shell_quote_string "$new_text"`' '
fi
if [ -n "$mysqld_ld_library_path" ]; then
new_text="$mysqld_ld_library_path"
[ -n "$LD_LIBRARY_PATH" ] && new_text="$new_text:$LD_LIBRARY_PATH"
text="${text}LD_LIBRARY_PATH="`shell_quote_string "$new_text"`' '
fi
echo "$text"
}
# set_malloc_lib LIB
# - If LIB is empty, do nothing and return
# - If LIB is 'tcmalloc', look for tcmalloc shared library in $malloc_dirs.
# tcmalloc is part of the Google perftools project.
# - If LIB is an absolute path, assume it is a malloc shared library
#
# Put LIB in mysqld_ld_preload, which will be added to LD_PRELOAD when
# running mysqld. See ld.so for details.
set_malloc_lib() {
# This list is kept intentionally simple.
malloc_dirs="/usr/lib /usr/lib64 /usr/lib/i386-linux-gnu /usr/lib/x86_64-linux-gnu"
malloc_lib="$1"
if [ "$malloc_lib" = tcmalloc ]; then
malloc_lib=
for libdir in `echo $malloc_dirs`; do
for flavor in _minimal '' _and_profiler _debug; do
tmp="$libdir/libtcmalloc$flavor.so"
#log_notice "DEBUG: Checking for malloc lib '$tmp'"
[ -r "$tmp" ] || continue
malloc_lib="$tmp"
break 2
done
done
if [ -z "$malloc_lib" ]; then
log_error "no shared library for --malloc-lib=tcmalloc found in $malloc_dirs"
exit 1
fi
fi
# Allow --malloc-lib='' to override other settings
[ -z "$malloc_lib" ] && return
case "$malloc_lib" in
/*)
if [ ! -r "$malloc_lib" ]; then
log_error "--malloc-lib can not be read and will not be used"
exit 1
fi
# Restrict to a the list in $malloc_dirs above
case "`dirname "$malloc_lib"`" in
/usr/lib) ;;
/usr/lib64) ;;
/usr/lib/i386-linux-gnu) ;;
/usr/lib/x86_64-linux-gnu) ;;
*)
log_error "--malloc-lib must be located in one of the directories: $malloc_dirs"
exit 1
;;
esac
;;
*)
log_error "--malloc-lib must be an absolute path or 'tcmalloc'; " \
"ignoring value '$malloc_lib'"
exit 1
;;
esac
add_mysqld_ld_preload "$malloc_lib"
}
#
# First, try to find BASEDIR and ledir (where mysqld is)
#
if echo '@pkgdatadir@' | grep '^@prefix@' > /dev/null
then
relpkgdata=`echo '@pkgdatadir@' | sed -e 's,^@prefix@,,' -e 's,^/,,' -e 's,^,./,'`
else
# pkgdatadir is not relative to prefix
relpkgdata='@pkgdatadir@'
fi
MY_PWD=`pwd`
# Check for the directories we would expect from a binary release install
if test -n "$MY_BASEDIR_VERSION" -a -d "$MY_BASEDIR_VERSION"
then
# BASEDIR is already overridden on command line. Do not re-set.
# Use BASEDIR to discover le.
if test -x "$MY_BASEDIR_VERSION/libexec/mysqld"
then
ledir="$MY_BASEDIR_VERSION/libexec"
elif test -x "$MY_BASEDIR_VERSION/sbin/mysqld"
then
ledir="$MY_BASEDIR_VERSION/sbin"
else
ledir="$MY_BASEDIR_VERSION/bin"
fi
elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/bin/mysqld"
then
MY_BASEDIR_VERSION="$MY_PWD" # Where bin, share and data are
ledir="$MY_PWD/bin" # Where mysqld is
# Check for the directories we would expect from a source install
elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/libexec/mysqld"
then
MY_BASEDIR_VERSION="$MY_PWD" # Where libexec, share and var are
ledir="$MY_PWD/libexec" # Where mysqld is
elif test -f "$relpkgdata"/english/errmsg.sys -a -x "$MY_PWD/sbin/mysqld"
then
MY_BASEDIR_VERSION="$MY_PWD" # Where sbin, share and var are
ledir="$MY_PWD/sbin" # Where mysqld is
# Since we didn't find anything, used the compiled-in defaults
else
MY_BASEDIR_VERSION='@prefix@'
ledir='@libexecdir@'
fi
#
# Second, try to find the data directory
#
# Try where the binary installs put it
if test -d $MY_BASEDIR_VERSION/data/mysql
then
DATADIR=$MY_BASEDIR_VERSION/data
if test -z "$defaults" -a -r "$DATADIR/my.cnf"
then
defaults="--defaults-extra-file=$DATADIR/my.cnf"
fi
# Next try where the source installs put it
elif test -d $MY_BASEDIR_VERSION/var/mysql
then
DATADIR=$MY_BASEDIR_VERSION/var
# Or just give up and use our compiled-in default
else
DATADIR=@localstatedir@
fi
if test -z "$MYSQL_HOME"
then
if test -r "$MY_BASEDIR_VERSION/my.cnf" && test -r "$DATADIR/my.cnf"
then
log_error "WARNING: Found two instances of my.cnf -
$MY_BASEDIR_VERSION/my.cnf and
$DATADIR/my.cnf
IGNORING $DATADIR/my.cnf"
MYSQL_HOME=$MY_BASEDIR_VERSION
elif test -r "$DATADIR/my.cnf"
then
log_error "WARNING: Found $DATADIR/my.cnf
The data directory is a deprecated location for my.cnf, please move it to
$MY_BASEDIR_VERSION/my.cnf"
MYSQL_HOME=$DATADIR
else
MYSQL_HOME=$MY_BASEDIR_VERSION
fi
fi
export MYSQL_HOME
# Get first arguments from the my.cnf file, groups [mysqld] and [mysqld_safe]
# and then merge with the command line arguments
if test -x "$MY_BASEDIR_VERSION/bin/my_print_defaults"
then
print_defaults="$MY_BASEDIR_VERSION/bin/my_print_defaults"
elif test -x `dirname $0`/my_print_defaults
then
print_defaults="`dirname $0`/my_print_defaults"
elif test -x ./bin/my_print_defaults
then
print_defaults="./bin/my_print_defaults"
elif test -x @bindir@/my_print_defaults
then
print_defaults="@bindir@/my_print_defaults"
elif test -x @bindir@/mysql_print_defaults
then
print_defaults="@bindir@/mysql_print_defaults"
else
print_defaults="my_print_defaults"
fi
append_arg_to_args () {
args="$args "`shell_quote_string "$1"`
}
args=
SET_USER=2
parse_arguments `$print_defaults $defaults --loose-verbose mysqld server`
if test $SET_USER -eq 2
then
SET_USER=0
fi
parse_arguments `$print_defaults $defaults --loose-verbose mysqld_safe safe_mysqld`
parse_arguments PICK-ARGS-FROM-ARGV "$@"
#
# Try to find the plugin directory
#
# Use user-supplied argument
if [ -n "${PLUGIN_DIR}" ]; then
plugin_dir="${PLUGIN_DIR}"
else
# Try to find plugin dir relative to basedir
for dir in lib64/mysql/plugin lib64/plugin lib/mysql/plugin lib/plugin
do
if [ -d "${MY_BASEDIR_VERSION}/${dir}" ]; then
plugin_dir="${MY_BASEDIR_VERSION}/${dir}"
break
fi
done
# Give up and use compiled-in default
if [ -z "${plugin_dir}" ]; then
plugin_dir='@pkgplugindir@'
fi
fi
plugin_dir="${plugin_dir}${PLUGIN_VARIANT}"
# Determine what logging facility to use
# Ensure that 'logger' exists, if it's requested
if [ $want_syslog -eq 1 ]
then
my_which logger > /dev/null 2>&1
if [ $? -ne 0 ]
then
log_error "--syslog requested, but no 'logger' program found. Please ensure that 'logger' is in your PATH, or do not specify the --syslog option to mysqld_safe."
exit 1
fi
fi
if [ -n "$err_log" -o $want_syslog -eq 0 ]
then
if [ -n "$err_log" ]
then
# mysqld adds ".err" if there is no extension on the --log-error
# argument; must match that here, or mysqld_safe will write to a
# different log file than mysqld
# mysqld does not add ".err" to "--log-error=foo."; it considers a
# trailing "." as an extension
if expr "$err_log" : '.*\.[^/]*$' > /dev/null
then
:
else
err_log="$err_log".err
fi
case "$err_log" in
/* ) ;;
* ) err_log="$DATADIR/$err_log" ;;
esac
else
err_log=$DATADIR/`@HOSTNAME@`.err
fi
append_arg_to_args "--log-error=$err_log"
if [ $want_syslog -eq 1 ]
then
# User explicitly asked for syslog, so warn that it isn't used
log_error "Can't log to error log and syslog at the same time. Remove all --log-error configuration options for --syslog to take effect."
fi
# Log to err_log file
log_notice "Logging to '$err_log'."
logging=file
if [ ! -f "$err_log" -a ! -h "$err_log" ]; then # if error log already exists,
touch "$err_log" # we just append. otherwise,
chmod "$fmode" "$err_log" # fix the permissions here!
fi
else
if [ -n "$syslog_tag" ]
then
# Sanitize the syslog tag
syslog_tag=`echo "$syslog_tag" | sed -e 's/[^a-zA-Z0-9_-]/_/g'`
syslog_tag_mysqld_safe="${syslog_tag_mysqld_safe}-$syslog_tag"
syslog_tag_mysqld="${syslog_tag_mysqld}-$syslog_tag"
fi
log_notice "Logging to syslog."
logging=syslog
fi
USER_OPTION=""
if test -w / -o "$USER" = "root"
then
if test "$user" != "root" -o $SET_USER = 1
then
USER_OPTION="--user=$user"
fi
# Change the err log to the right user, if it is in use
if [ $want_syslog -eq 0 -a ! -h "$err_log" ]; then
touch "$err_log"
chown $user "$err_log"
fi
if test -n "$open_files"
then
ulimit -n $open_files
fi
fi
if test -n "$open_files"
then
append_arg_to_args "--open-files-limit=$open_files"
fi
safe_mysql_unix_port=${mysql_unix_port:-${MYSQL_UNIX_PORT:-@MYSQL_UNIX_ADDR@}}
# Make sure that directory for $safe_mysql_unix_port exists
mysql_unix_port_dir=`dirname $safe_mysql_unix_port`
if [ ! -d $mysql_unix_port_dir ]
then
if [ ! -h $mysql_unix_port_dir ]; then
mkdir $mysql_unix_port_dir
chown $user $mysql_unix_port_dir
chmod 755 $mysql_unix_port_dir
fi
fi
# If the user doesn't specify a binary, we assume name "mysqld"
if test -z "$MYSQLD"
then
MYSQLD=mysqld
fi
if test ! -x "$ledir/$MYSQLD"
then
log_error "The file $ledir/$MYSQLD
does not exist or is not executable. Please cd to the mysql installation
directory and restart this script from there as follows:
./bin/mysqld_safe&
See http://dev.mysql.com/doc/mysql/en/mysqld-safe.html for more information"
exit 1
fi
if test -z "$pid_file"
then
pid_file="$DATADIR/`@HOSTNAME@`.pid"
else
case "$pid_file" in
/* ) ;;
* ) pid_file="$DATADIR/$pid_file" ;;
esac
fi
append_arg_to_args "--pid-file=$pid_file"
if test -n "$mysql_unix_port"
then
append_arg_to_args "--socket=$mysql_unix_port"
fi
if test -n "$mysql_tcp_port"
then
append_arg_to_args "--port=$mysql_tcp_port"
fi
if test $niceness -eq 0
then
NOHUP_NICENESS="nohup"
else
NOHUP_NICENESS="nohup nice -$niceness"
fi
# Using nice with no args to get the niceness level is GNU-specific.
# This check could be extended for other operating systems (e.g.,
# BSD could use "nohup sh -c 'ps -o nice -p $$' | tail -1").
# But, it also seems that GNU nohup is the only one which messes
# with the priority, so this is okay.
if nohup nice > /dev/null 2>&1
then
normal_niceness=`nice`
nohup_niceness=`nohup nice 2>/dev/null`
numeric_nice_values=1
for val in $normal_niceness $nohup_niceness
do
case "$val" in
-[0-9] | -[0-9][0-9] | -[0-9][0-9][0-9] | \
[0-9] | [0-9][0-9] | [0-9][0-9][0-9] )
;;
* )
numeric_nice_values=0 ;;
esac
done
if test $numeric_nice_values -eq 1
then
nice_value_diff=`expr $nohup_niceness - $normal_niceness`
if test $? -eq 0 && test $nice_value_diff -gt 0 && \
nice --$nice_value_diff echo testing > /dev/null 2>&1
then
# nohup increases the priority (bad), and we are permitted
# to lower the priority with respect to the value the user
# might have been given
niceness=`expr $niceness - $nice_value_diff`
NOHUP_NICENESS="nice -$niceness nohup"
fi
fi
else
if nohup echo testing > /dev/null 2>&1
then
:
else
# nohup doesn't work on this system
NOHUP_NICENESS=""
fi
fi
# Try to set the core file size (even if we aren't root) because many systems
# don't specify a hard limit on core file size.
if test -n "$core_file_size"
then
ulimit -c $core_file_size
fi
#
# If there exists an old pid file, check if the daemon is already running
# Note: The switches to 'ps' may depend on your operating system
if test -f "$pid_file"
then
PID=`cat "$pid_file"`
if @CHECK_PID@
then
if @FIND_PROC@
then # The pid contains a mysqld process
log_error "A mysqld process already exists"
exit 1
fi
fi
if [ ! -h "$pid_file" ]; then
rm -f "$pid_file"
fi
if test -f "$pid_file"
then
log_error "Fatal error: Can't remove the pid file:
$pid_file
Please remove it manually and start $0 again;
mysqld daemon not started"
exit 1
fi
fi
#
# Uncomment the following lines if you want all tables to be automatically
# checked and repaired during startup. You should add sensible key_buffer
# and sort_buffer values to my.cnf to improve check performance or require
# less disk space.
# Alternatively, you can start mysqld with the "myisam-recover" option. See
# the manual for details.
#
# echo "Checking tables in $DATADIR"
# $MY_BASEDIR_VERSION/bin/myisamchk --silent --force --fast --medium-check $DATADIR/*/*.MYI
# $MY_BASEDIR_VERSION/bin/isamchk --silent --force $DATADIR/*/*.ISM
# Does this work on all systems?
#if type ulimit | grep "shell builtin" > /dev/null
#then
# ulimit -n 256 > /dev/null 2>&1 # Fix for BSD and FreeBSD systems
#fi
cmd="`mysqld_ld_preload_text`$NOHUP_NICENESS"
for i in "$ledir/$MYSQLD" "$defaults" "--basedir=$MY_BASEDIR_VERSION" \
"--datadir=$DATADIR" "--plugin-dir=$plugin_dir" "$USER_OPTION"
do
cmd="$cmd "`shell_quote_string "$i"`
done
cmd="$cmd $args"
# Avoid 'nohup: ignoring input' warning
nohup_redir=""
test -n "$NOHUP_NICENESS" && nohup_redir=" < /dev/null"
log_notice "Starting $MYSQLD daemon with databases from $DATADIR"
# variable to track the current number of "fast" (a.k.a. subsecond) restarts
fast_restart=0
# maximum number of restarts before trottling kicks in
max_fast_restarts=5
# flag whether a usable sleep command exists
have_sleep=1
# maximum number of wsrep restarts
max_wsrep_restarts=0
while true
do
# Some extra safety
if [ ! -h "$safe_mysql_unix_port" ]; then
rm -f "$safe_mysql_unix_port"
fi
if [ ! -h "$pid_file" ]; then
rm -f "$pid_file"
fi
start_time=`date +%M%S`
# this sets wsrep_start_position_opt
wsrep_recover_position "$cmd"
[ $? -ne 0 ] && exit 1 #
[ -n "$wsrep_urls" ] && url=`wsrep_pick_url $wsrep_urls` # check connect address
if [ -z "$url" ]
then
eval_log_error "$cmd $wsrep_start_position_opt $nohup_redir"
else
eval_log_error "$cmd $wsrep_start_position_opt --wsrep_cluster_address=$url $nohup_redir"
fi
if [ $want_syslog -eq 0 -a ! -f "$err_log" -a ! -h "$err_log" ]; then
touch "$err_log" # hypothetical: log was renamed but not
chown $user "$err_log" # flushed yet. we'd recreate it with
chmod "$fmode" "$err_log" # wrong owner next time we log, so set
fi # it up correctly while we can!
end_time=`date +%M%S`
if test ! -f "$pid_file" # This is removed if normal shutdown
then
break
fi
# sanity check if time reading is sane and there's sleep
if test $end_time -gt 0 -a $have_sleep -gt 0
then
# throttle down the fast restarts
if test $end_time -eq $start_time
then
fast_restart=`expr $fast_restart + 1`
if test $fast_restart -ge $max_fast_restarts
then
log_notice "The server is respawning too fast. Sleeping for 1 second."
sleep 1
sleep_state=$?
if test $sleep_state -gt 0
then
log_notice "The server is respawning too fast and no working sleep command. Turning off trottling."
have_sleep=0
fi
fast_restart=0
fi
else
fast_restart=0
fi
fi
if @TARGET_LINUX@ && test $KILL_MYSQLD -eq 1
then
# Test if one process was hanging.
# This is only a fix for Linux (running as base 3 mysqld processes)
# but should work for the rest of the servers.
# The only thing is ps x => redhat 5 gives warnings when using ps -x.
# kill -9 is used or the process won't react on the kill.
numofproces=`ps xaww | grep -v "grep" | grep "$ledir/$MYSQLD\>" | grep -c "pid-file=$pid_file"`
log_notice "Number of processes running now: $numofproces"
I=1
while test "$I" -le "$numofproces"
do
PROC=`ps xaww | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "pid-file=$pid_file" | sed -n '$p'`
for T in $PROC
do
break
done
# echo "TEST $I - $T **"
if kill -9 $T
then
log_error "$MYSQLD process hanging, pid $T - killed"
else
break
fi
I=`expr $I + 1`
done
fi
if [ -n "$wsrep_restart" ]
then
if [ $wsrep_restart -le $max_wsrep_restarts ]
then
wsrep_restart=`expr $wsrep_restart + 1`
log_notice "WSREP: sleeping 15 seconds before restart"
sleep 15
else
log_notice "WSREP: not restarting wsrep node automatically"
break
fi
fi
log_notice "mysqld restarted"
done
log_notice "mysqld from pid file $pid_file ended"
|
holser/mysql-wsrep
|
scripts/mysqld_safe.sh
|
Shell
|
gpl-2.0
| 28,342 |
#!/bin/bash
source ./mednafen-common.sh
export SDL_OMAP_LAYER_SIZE=640x408
while [ 1 ]
do
file=`select_rom "lastdir-lynx.txt" "Atari Lynx"`
if [ "$file" == "" ]; then
exit
fi
./mednafen "$file"
done
|
pder/mednafen-pandora
|
pandora/mednafen-lynx.sh
|
Shell
|
gpl-2.0
| 226 |
#!/bin/bash
######################################################################
#
# idFORGE Framework - Manage identity manuals in community
# Copyright © 2015 The CentOS Artwork SIG
#
# idFORGE Framework is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# idFORGE Framework is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with idFORGE Framework; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Alain Reguera Delgado <[email protected]>
# 39 Street No. 4426 Cienfuegos, Cuba.
#
######################################################################
function base_setComposition {
local PNG_BASE_COMPOSITION='' PNG_BASE_COMPOSITIONS='' ; base_setConfigOption 'png-base-composition'
[[ -z ${PNG_BASE_COMPOSITIONS} ]] && return
idforge_printMessage "${RENDER_FROM_FILE}" --as-composing-line
for PNG_BASE_COMPOSITION in ${PNG_BASE_COMPOSITIONS};do
local PNG_BASE_COMPOSITION_FILE=$(echo ${PNG_BASE_COMPOSITION} | cut -d: -f1)
[[ ! ${PNG_BASE_COMPOSITION_FILE} =~ ^/ ]] && PNG_BASE_COMPOSITION_FILE=${RENDER_DIRECTORY}/${PNG_BASE_COMPOSITION_FILE}
idforge_checkFiles -efi 'image' ${PNG_BASE_COMPOSITION_FILE}
local PNG_BASE_COMPOSITION_GEOMETRY=$(echo ${PNG_BASE_COMPOSITION} | cut -d: -f2)
/usr/bin/composite -geometry ${PNG_BASE_COMPOSITION_GEOMETRY} ${PNG_BASE_COMPOSITION_FILE} ${RENDER_FROM_FILE} ${RENDER_FROM_FILE}
[[ ${?} -ne 0 ]] && idforge_printMessage "`gettext "The png-base-composition failed."`" --as-error-line
done
}
|
areguera/idforge
|
Library/Modules/Render/Modules/Png/Modules/Base/base_setComposition.sh
|
Shell
|
gpl-2.0
| 2,041 |
#!/bin/sh
#
# Run this to generate all the initial makefiles.
#
# $Id: autogen.sh 38447 2011-08-10 15:28:29Z morriss $
DIE=true
PROJECT="Wireshark"
# If you are going to use the non-default name for automake becase your OS
# installaion has multiple versions, you need to call both aclocal and automake
# with that version number, as they come from the same package.
#AM_VERSION='-1.8'
ACLOCAL=aclocal$AM_VERSION
AUTOHEADER=autoheader
AUTOMAKE=automake$AM_VERSION
AUTOCONF=autoconf
# Check for python. Python did not support --version before version 2.5.
# Until we require a version > 2.5, we should use -V.
PYVER=`python -V 2>&1 | sed 's/Python *//'`
case "$PYVER" in
2*|3*)
;;
*)
cat >&2 <<_EOF_
You must have Python in order to compile $PROJECT.
Download the appropriate package for your distribution/OS,
or get the source tarball at http://www.python.org/
_EOF_
DIE="exit 1"
esac
ACVER=`$AUTOCONF --version | grep '^autoconf' | sed 's/.*) *//'`
case "$ACVER" in
'' | 0.* | 1.* | 2.[0-5]* )
cat >&2 <<_EOF_
You must have autoconf 2.60 or later installed to compile $PROJECT.
Download the appropriate package for your distribution/OS,
or get the source tarball at ftp://ftp.gnu.org/pub/gnu/autoconf/
_EOF_
DIE="exit 1"
;;
esac
AMVER=`$AUTOMAKE --version | grep '^automake' | sed 's/.*) *//'`
case "$AMVER" in
1.9* | 1.[1][0-9]*)
;;
*)
cat >&2 <<_EOF_
You must have automake 1.9 or later installed to compile $PROJECT.
Download the appropriate package for your distribution/OS,
or get the source tarball at ftp://ftp.gnu.org/pub/gnu/automake/
_EOF_
DIE="exit 1"
;;
esac
#
# Apple's Developer Tools have a "libtool" that has nothing to do with
# the GNU libtool; they call the latter "glibtool". They also call
# libtoolize "glibtoolize".
#
# Check for "glibtool" first.
#
LTVER=`glibtool --version 2>/dev/null | grep ' libtool)' | \
sed 's/.*libtool) \([0-9][0-9.]*\)[^ ]* .*/\1/'`
if test -z "$LTVER"
then
LTVER=`libtool --version | grep ' libtool)' | \
sed 's/.*) \([0-9][0-9.]*\)[^ ]* .*/\1/' `
LIBTOOLIZE=libtoolize
else
LIBTOOLIZE=glibtoolize
fi
case "$LTVER" in
'' | 0.* | 1.[0-3]* )
cat >&2 <<_EOF_
You must have libtool 1.4 or later installed to compile $PROJECT.
Download the appropriate package for your distribution/OS,
or get the source tarball at ftp://ftp.gnu.org/pub/gnu/libtool/
_EOF_
DIE="exit 1"
;;
esac
$DIE
aclocal_flags=`./aclocal-flags`
aclocalinclude="$ACLOCAL_FLAGS $aclocal_flags";
echo $ACLOCAL $aclocalinclude
$ACLOCAL $aclocalinclude || exit 1
#
# We do NOT want libtoolize overwriting our versions of config.guess and
# config.sub, so move them away and then move them back.
# We don't omit "--force", as we want libtoolize to install other files
# without whining.
#
mv config.guess config.guess.save-libtool
mv config.sub config.sub.save-libtool
LTARGS=" --copy --force"
echo $LIBTOOLIZE $LTARGS
$LIBTOOLIZE $LTARGS || exit 1
rm -f config.guess config.sub
mv config.guess.save-libtool config.guess
mv config.sub.save-libtool config.sub
echo $AUTOHEADER
$AUTOHEADER || exit 1
echo $AUTOMAKE --add-missing --gnu $am_opt
$AUTOMAKE --add-missing --gnu $am_opt || exit 1
echo $AUTOCONF
$AUTOCONF || exit 1
#./configure "$@" || exit 1
echo
echo "Now type \"./configure [options]\" and \"make\" to compile $PROJECT."
|
drower/wireshark-1.10.0
|
autogen.sh
|
Shell
|
gpl-2.0
| 3,317 |
openssl genrsa -out secret/key.pem 2048
openssl rsa -in secret/key.pem -pubout -out secret/cert.pem
|
plusplus7/KyrinBox
|
src/ci/make_secret.sh
|
Shell
|
gpl-2.0
| 100 |
#!/usr/bin/env bash
# -----------------------
# SEXY CRAZY WORMSY CUBES
# -----------------------
# Copyright(c)2015 Jonas Sjöberg
# https://github.com/jonasjberg
# [email protected]
#_______________________________________________________________________________
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#_______________________________________________________________________________
set -e # exit on first error
#set -x # debug mode
PROGRAM_NAME=$(basename $0)
DEFAULT_ERROR_MSG="Unknown (unspecified) error!"
USAGE_TEXT="Example usage: \"./${PROGRAM_NAME} WormsyCubes_Whatever.jar\""
function die()
{
msg=${2:-$DEFAULT_ERROR_MSG}
echo "Failed!"
echo "${PROGRAM_NAME} message: ${msg} .."
exit ${1:-0}
}
if [ $# -eq 0 ]; then
die 1 "${USAGE_TEXT}"
fi
if ! command -v "java" >/dev/null; then
die 1 "unable to locate java"
else
java -jar "${1}"
fi
|
jonasjberg/WormsyCubes
|
jar/wormsy.sh
|
Shell
|
gpl-2.0
| 1,811 |
#!/bin/bash
######################################################################
#
# idFORGE Framework - Manage identity manuals in community
# Copyright © 2015 The CentOS Artwork SIG
#
# idFORGE Framework is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# idFORGE Framework is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with idFORGE Framework; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Alain Reguera Delgado <[email protected]>
# 39 Street No. 4426 Cienfuegos, Cuba.
#
######################################################################
function extended {
local OPTION=0
local RENDER_FROM_PO_FILE=${RENDER_FROM_PO[0]}
while [[ ${OPTION} -lt ${#RENDER_FROM[*]} ]];do
local RENDER_FROM_FILE=${RENDER_FROM[${OPTION}]}
idforge_printMessage "${RENDER_FROM_FILE}" --as-processing-line
RENDER_FROM_INSTANCES[${OPTION}]=$(idforge_printTemporalFile "${RENDER_FROM[${OPTION}]}")
xml_setInstance
xml_setInstanceLocalized
xml_setInstanceExpanded
OPTION=$(( ++OPTION ))
done
}
|
areguera/idforge
|
Library/Modules/Render/Modules/Xml/Modules/Extended/extended.sh
|
Shell
|
gpl-2.0
| 1,592 |
#!/bin/sh
verbose=false
if [ "x$1" = "x-v" ]; then
verbose=true
out=/dev/stdout
err=/dev/stderr
else
out=/dev/null
err=/dev/null
fi
## make & makeopts
if gmake --version > /dev/null 2>&1; then
make=gmake;
else
make=make;
fi
makeopts="--quiet --no-print-directory -j"
make_print() {
echo `$make $makeopts print-$1`
}
## command tools
awk='awk'
bc='bc'
date='date'
grep='grep'
rm='rm -f'
sed='sed'
## symbol table
sym_table='obj/kernel.sym'
## gdb & gdbopts
gdb="$(make_print GDB)"
gdbport='1234'
gdb_in="$(make_print GRADE_GDB_IN)"
## qemu & qemuopts
qemu="$(make_print qemu)"
qemu_out="$(make_print GRADE_QEMU_OUT)"
if $qemu -nographic -help | grep -q '^-gdb'; then
qemugdb="-gdb tcp::$gdbport"
else
qemugdb="-s -p $gdbport"
fi
## default variables
default_timeout=30
default_pts=5
pts=5
part=0
part_pos=0
total=0
total_pos=0
## default functions
update_score() {
total=`expr $total + $part`
total_pos=`expr $total_pos + $part_pos`
part=0
part_pos=0
}
get_time() {
echo `$date +%s.%N 2> /dev/null`
}
show_part() {
echo "Part $1 Score: $part/$part_pos"
echo
update_score
}
show_final() {
update_score
echo "Total Score: $total/$total_pos"
if [ $total -lt $total_pos ]; then
exit 1
fi
}
show_time() {
t1=$(get_time)
time=`echo "scale=1; ($t1-$t0)/1" | $sed 's/.N/.0/g' | $bc 2> /dev/null`
echo "(${time}s)"
}
show_build_tag() {
echo "$1:" | $awk '{printf "%-24s ", $0}'
}
show_check_tag() {
echo "$1:" | $awk '{printf " -%-40s ", $0}'
}
show_msg() {
echo $1
shift
if [ $# -gt 0 ]; then
echo "$@" | awk '{printf " %s\n", $0}'
echo
fi
}
pass() {
show_msg OK "$@"
part=`expr $part + $pts`
part_pos=`expr $part_pos + $pts`
}
fail() {
show_msg WRONG "$@"
part_pos=`expr $part_pos + $pts`
}
run_qemu() {
# Run qemu with serial output redirected to $qemu_out. If $brkfun is non-empty,
# wait until $brkfun is reached or $timeout expires, then kill QEMU
qemuextra=
if [ "$brkfun" ]; then
qemuextra="-S $qemugdb"
fi
if [ -z "$timeout" ] || [ $timeout -le 0 ]; then
timeout=$default_timeout;
fi
t0=$(get_time)
(
ulimit -t $timeout
exec $qemu -nographic $qemuopts -serial file:$qemu_out -monitor null -no-reboot $qemuextra
) > $out 2> $err &
pid=$!
# wait for QEMU to start
sleep 1
if [ -n "$brkfun" ]; then
# find the address of the kernel $brkfun function
brkaddr=`$grep " $brkfun\$" $sym_table | $sed -e's/ .*$//g'`
(
echo "target remote localhost:$gdbport"
echo "set architecture i386:x86-64:intel"
echo "break *0x$brkaddr"
echo "continue"
) > $gdb_in
$gdb -batch -nx -x $gdb_in > /dev/null 2>&1
# make sure that QEMU is dead
# on OS X, exiting gdb doesn't always exit qemu
kill $pid > /dev/null 2>&1
fi
}
build_run() {
# usage: build_run <tag> <args>
show_build_tag "$1"
shift
if $verbose; then
echo "$make $@ ..."
fi
$make $makeopts $@ 'DEFS+=-DDEBUG_GRADE' > $out 2> $err
if [ $? -ne 0 ]; then
echo $make $@ failed
exit 1
fi
# now run qemu and save the output
run_qemu
show_time
}
check_result() {
# usage: check_result <tag> <check> <check args...>
show_check_tag "$1"
shift
# give qemu some time to run (for asynchronous mode)
if [ ! -s $qemu_out ]; then
sleep 4
fi
if [ ! -s $qemu_out ]; then
fail > /dev/null
echo 'no $qemu_out'
else
check=$1
shift
$check "$@"
fi
}
check_regexps() {
okay=yes
not=0
reg=0
error=
for i do
if [ "x$i" = "x!" ]; then
not=1
elif [ "x$i" = "x-" ]; then
reg=1
else
if [ $reg -ne 0 ]; then
$grep '-E' "^$i\$" $qemu_out > /dev/null
else
$grep '-F' "$i" $qemu_out > /dev/null
fi
found=$(($? == 0))
if [ $found -eq $not ]; then
if [ $found -eq 0 ]; then
msg="!! error: missing '$i'"
else
msg="!! error: got unexpected line '$i'"
fi
okay=no
if [ -z "$error" ]; then
error="$msg"
else
error="$error\n$msg"
fi
fi
not=0
reg=0
fi
done
if [ "$okay" = "yes" ]; then
pass
else
fail "$error"
if $verbose; then
exit 1
fi
fi
}
run_test() {
# usage: run_test [-tag <tag>] [-prog <prog>] [-Ddef...] [-check <check>] checkargs ...
tag=
prog=
check=check_regexps
while true; do
select=
case $1 in
-tag|-prog)
select=`expr substr $1 2 ${#1}`
eval $select='$2'
;;
esac
if [ -z "$select" ]; then
break
fi
shift
shift
done
defs=
while expr "x$1" : "x-D.*" > /dev/null; do
defs="DEFS+='$1' $defs"
shift
done
if [ "x$1" = "x-check" ]; then
check=$2
shift
shift
fi
if [ -z "$prog" ]; then
$make $makeopts touch > /dev/null 2>&1
args="$defs"
else
if [ -z "$tag" ]; then
tag="$prog"
fi
args="build-$prog $defs"
fi
build_run "$tag" "$args"
check_result 'check result' "$check" "$@"
}
quick_run() {
# usage: quick_run <tag> [-Ddef...]
tag="$1"
shift
defs=
while expr "x$1" : "x-D.*" > /dev/null; do
defs="DEFS+='$1' $defs"
shift
done
$make $makeopts touch > /dev/null 2>&1
build_run "$tag" "$defs"
}
quick_check() {
# usage: quick_check <tag> checkargs ...
tag="$1"
shift
check_result "$tag" check_regexps "$@"
}
## kernel image
osimg=$(make_print ucoreimg)
## swap image
swapimg=$(make_print swapimg)
## sfs image
sfsimg=$(make_print sfsimg)
## set default qemu-options
qemuopts="-m 256m -hda $osimg -drive file=$swapimg,media=disk,cache=writeback -drive file=$sfsimg,media=disk,cache=writeback"
## set break-function, default is readline
brkfun=readline
default_check() {
pts=10
check_regexps "$@"
pts=10
quick_check 'check output' \
'check_alloc_page() succeeded!' \
'check_boot_pgdir() succeeded!' \
'check_slab() succeeded!' \
'check_vma_struct() succeeded!' \
'check_pgfault() succeeded!' \
'check_vmm() succeeded.' \
'check_swap() succeeded.' \
'check_mm_swap: step1, mm_map ok.' \
'check_mm_swap: step2, mm_unmap ok.' \
'check_mm_swap: step3, exit_mmap ok.' \
'check_mm_swap: step4, dup_mmap ok.' \
'check_mm_swap() succeeded.' \
'check_mm_shm_swap: step1, share memory ok.' \
'check_mm_shm_swap: step2, dup_mmap ok.' \
'check_mm_shm_swap() succeeded.' \
'vfs: mount disk0.' \
'++ setup timer interrupts'
}
## check now!!
run_test -prog 'hello2' -check default_check \
'kernel_execve: pid = 3, name = "hello2".' \
'Hello world!!.' \
'I am process 3.' \
'hello2 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
run_test -prog 'fwrite_test' -check default_check \
'kernel_execve: pid = 3, name = "fwrite_test".' \
'Hello world!!.' \
'I am process 3.' \
'dup fd ok.' \
'fork fd ok.' \
'fwrite_test pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
run_test -prog 'fread_test2' -check default_check \
'kernel_execve: pid = 3, name = "fread_test2".' \
'fread_test2 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
show_part A
pts=30
timeout=300
run_test -prog 'sfs_filetest1' \
'kernel_execve: pid = 3, name = "sfs_filetest1".' \
'init_data ok.' \
'random_test ok.' \
'sfs_filetest1 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
run_test -prog 'sfs_filetest2' \
'kernel_execve: pid = 3, name = "sfs_filetest2".' \
'sfs_filetest2 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
run_test -prog 'sfs_dirtest1' \
'kernel_execve: pid = 3, name = "sfs_dirtest1".' \
'0: current: disk0:/' \
'1: current: disk0:/' \
'2: current: disk0:/home/' \
- '2: d 2 .... 512 .' \
- '2: d 6 .... 1536 ..' \
'3: current: disk0:/testman/' \
- '3: d 3 .... 2560 .' \
- '3: d 6 .... 1536 ..' \
'3: - 1 21 83153 awk' \
'3: d 2 5 1792 coreutils' \
'3: - 1 8 31690 cpp' \
'3: - 1 100 408495 gcc' \
'3: - 1 3 8341 gdb' \
'3: - 1 12 46254 ld' \
'3: - 1 3 10371 sed' \
'3: - 1 5 17354 zsh' \
'4: current: disk0:/testman/coreutils/' \
- '4: d 2 .... 1792 .' \
- '4: d 3 .... 2560 ..' \
'4: - 1 1 2115 cat' \
'4: - 1 2 5338 cp' \
'4: - 1 2 7487 ls' \
'4: - 1 1 3024 mv' \
'4: - 1 1 3676 rm' \
'5: current: disk0:/testman/' \
- '5: d 3 .... 2560 .' \
- '5: d 6 .... 1536 ..' \
'6: current: disk0:/' \
- '6: d 6 .... 1536 .' \
- '6: d 6 .... 1536 ..' \
- '6: d 2 .... ...... bin' \
'6: d 2 0 512 home' \
'6: d 2 1 768 test' \
'6: d 3 8 2560 testman' \
'sfs_dirtest1 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
show_part B
run_test -prog 'sfs_filetest3' \
'kernel_execve: pid = 3, name = "sfs_filetest3".' \
'0: - 1 0 0 testfile' \
'1: - 2 1 14 testfile' \
'1: - 2 1 14 orz' \
'link test ok.' \
'2: - 1 1 14 testfile' \
'unlink test ok.' \
'3: - 1 0 0 testfile' \
! - '2: ....................... orz' \
'sfs_filetest3 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
run_test -prog 'sfs_dirtest2' \
'kernel_execve: pid = 3, name = "sfs_dirtest2".' \
'0: current: disk0:/test/' \
- '0: d 2 .... 768 .' \
- '0: d 6 .... 1536 ..' \
'0: - 1 0 0 testfile' \
'1: current: disk0:/test/' \
- '1: d 3 .... 1280 .' \
- '1: d 3 .... 768 dir0' \
'1: - 1 0 0 file1' \
'2: current: disk0:/test/' \
- '2: d 3 .... 1280 .' \
- '2: d 3 .... 768 dir0' \
'2: - 2 0 0 file1' \
'3: current: disk0:/test/dir0/dir1/' \
- '3: d 2 .... 768 .' \
- '3: d 3 .... 768 ..' \
'3: - 2 0 0 file2' \
'4: current: disk0:/test/dir0/' \
- '4: d 2 .... 512 .' \
- '4: d 3 .... 1280 ..' \
'5: current: disk0:/test/' \
- '5: d 2 .... 768 .' \
- '5: d 6 .... 1536 ..' \
'sfs_dirtest2 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
run_test -prog 'sfs_dirtest3' \
'kernel_execve: pid = 3, name = "sfs_dirtest3".' \
'0: current: disk0:/test/' \
- '0: d 2 .... 768 .' \
- '0: d 6 .... 1536 ..' \
'0: - 1 0 0 testfile' \
'1: current: disk0:/test/dir0/dir1/' \
- '1: d 2 .... 512 .' \
- '1: d 3 .... 1024 ..' \
'2: current: disk0:/test/dir0/dir1/' \
- '2: d 2 .... 768 .' \
- '2: d 3 .... 768 ..' \
'2: - 1 1 28 file2' \
'3: current: disk0:/test/' \
- '3: d 4 .... 1280 .' \
- '3: d 6 .... 1536 ..' \
- '3: d 2 .... 768 dir2' \
- '3: d 2 .... 512 dir0' \
'4: current: disk0:/test/' \
- '4: d 2 .... 768 .' \
- '4: d 6 .... 1536 ..' \
'sfs_dirtest3 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
show_part C
run_test -prog 'sfs_exectest1' \
'kernel_execve: pid = 3, name = "sfs_exectest1".' \
'00: Hello world!!.' \
'01: Hello world!!.' \
'03: Hello world!!.' \
'05: Hello world!!.' \
'07: Hello world!!.' \
'09: Hello world!!.' \
'11: Hello world!!.' \
'13: Hello world!!.' \
'15: Hello world!!.' \
'17: Hello world!!.' \
'19: Hello world!!.' \
'21: Hello world!!.' \
'23: Hello world!!.' \
'25: Hello world!!.' \
'27: Hello world!!.' \
'29: Hello world!!.' \
'31: Hello world!!.' \
'sfs_exectest1 pass.' \
'all user-mode processes have quit.' \
'init check memory pass.' \
! - 'user panic at .*'
show_part D
## print final-score
show_final
|
spinlock/ucore
|
x86_64/lab6_filesystem/proj18.2/tools/grade.sh
|
Shell
|
gpl-2.0
| 22,689 |
#!/bin/bash
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
sudo -s
unset PROMPT_COMMAND
export PATH="/sbin:/usr/pkg/sbin:/usr/pkg/bin:$PATH"
export PKG_PATH="http://ftp.netbsd.org/pub/pkgsrc/packages/NetBSD/amd64/8.0_2018Q4/All/"
pkg_delete curl
pkg_add git python27 python36 py27-virtualenv py36-expat
git -c http.sslVerify=false clone https://github.com/secdev/scapy
cd scapy
virtualenv-2.7 venv
. venv/bin/activate
pip install tox
chown -R vagrant:vagrant ../scapy/
|
mtury/scapy
|
doc/vagrant_ci/provision_netbsd.sh
|
Shell
|
gpl-2.0
| 617 |
#!/bin/bash
cd `dirname $0`
HERE=`pwd`
cd /tr2/bookscanner/bookscanner
for project in `find -mindepth 1 -maxdepth 1 -type d`
do
if [ -f "$project/genpdf" ]
then
echo "$(date '+%Y%m%d %H:%M:%S') Building PDF for $project"
pushd "$project/booktif"
$HERE/tif2pdf.sh
rm ../genpdf
popd
echo "$(date '+%Y%m%d %H:%M:%S') DONE"
fi
done
|
vincib/bookscanner-viewer
|
sh/scanpdf.sh
|
Shell
|
gpl-3.0
| 349 |
#!/bin/bash
LIBRESONIC_HOME="/Library/Application Support/Libresonic"
chmod oug+rwx "$LIBRESONIC_HOME"
chown root:admin "$LIBRESONIC_HOME"
chmod oug+rx "$LIBRESONIC_HOME/transcode"
chown root:admin "$LIBRESONIC_HOME/transcode"
rm -rf "$LIBRESONIC_HOME/jetty"
echo Libresonic installation done
|
langera/libresonic
|
libresonic-installer-mac/src/postinstall.sh
|
Shell
|
gpl-3.0
| 298 |
#!/bin/sh
. ./variables.sh
cmake -DCMAKE_SYSTEM_NAME=Windows -DCMAKE_PREFIX_PATH=$QT_MINGW32_DIR\;$OPENSSL_DIR\;$ASPELL_DIR\;$BOOST_DIR\;$GETTEXT_DIR\;$LIBBZ2_DIR\;$LIBICONV_DIR\;$LIBIDN_DIR\;$LIBZ_DIR\;$LUA_DIR\;$PCRE_DIR\;$MINGW32_DIR -DCMAKE_C_COMPILER=$C_COMPILER -DCMAKE_CXX_COMPILER=$CXX_COMPILER -DCMAKE_RC_COMPILER=$RC_COMPILER -DCMAKE_CXX_FLAGS="-I$BOOST_HEADERS_DIR -I$GETTEXT_HEADERS_DIR -I$PCRE_HEADERS_DIR" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$INSTALL_DIR -DSHARE_DIR=resources -DUSE_QT=ON -DUSE_QT5=OFF -DOPENSSL_MSVC=ON -DUSE_ASPELL=ON -DFORCE_XDG=OFF -DDBUS_NOTIFY=OFF -DUSE_JS=ON -DUSE_MINIUPNP=ON -DWITH_SOUNDS=ON -DPERL_REGEX=ON -DUSE_QT_QML=OFF -DLUA_SCRIPT=ON -DWITH_LUASCRIPTS=ON -DUSE_QT_SQLITE=ON -DNO_UI_DAEMON=ON -DJSONRPC_DAEMON=ON -DUSE_CLI_JSONRPC=ON $SOURCES_DIR
|
pavel-pimenov/eiskaltdcpp
|
windows/outdated/configure.sh
|
Shell
|
gpl-3.0
| 809 |
cd ..
if [ -f ./config/binary ]
then
export binary=$(cat ./config/binary)
else
export binary=$(pwd)/iso_olusturucu/binary
fi
xorriso -as mkisofs \
-iso-level 3 -rock -joliet \
-max-iso9660-filenames -omit-period \
-omit-version-number -relaxed-filenames -allow-lowercase \
-volid "CustomLiveIso" \
-eltorito-boot isolinux/isolinux.bin \
-eltorito-catalog isolinux/isolinux.cat \
-no-emul-boot -boot-load-size 4 -boot-info-table \
-eltorito-alt-boot -e boot/grub/efi.img -isohybrid-gpt-basdat -no-emul-boot \
-isohybrid-mbr $binary/isolinux/isohybrid-mbr \
-output "live-image-amd64.hybrid.iso" $binary
|
parduscix/iso_duzenleyici
|
iso_olusturucu/binary.sh
|
Shell
|
gpl-3.0
| 678 |
#!/bin/sh
# Run this to generate all the initial makefiles, etc.
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
DIE=0
if [ -n "$GNOME2_DIR" ]; then
ACLOCAL_FLAGS="-I $GNOME2_DIR/share/aclocal $ACLOCAL_FLAGS"
LD_LIBRARY_PATH="$GNOME2_DIR/lib:$LD_LIBRARY_PATH"
PATH="$GNOME2_DIR/bin:$PATH"
export PATH
export LD_LIBRARY_PATH
fi
(test -f $srcdir/configure.ac) || {
echo -n "**Error**: Directory "\`$srcdir\'" does not look like the"
echo " top-level package directory"
exit 1
}
(autoconf --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`autoconf' installed."
echo "Download the appropriate package for your distribution,"
echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
(grep "^IT_PROG_INTLTOOL" $srcdir/configure.ac >/dev/null) && {
(intltoolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`intltool' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
}
(grep "^AM_PROG_XML_I18N_TOOLS" $srcdir/configure.ac >/dev/null) && {
(xml-i18n-toolize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`xml-i18n-toolize' installed."
echo "You can get it from:"
echo " ftp://ftp.gnome.org/pub/GNOME/"
DIE=1
}
}
(grep "^AM_PROG_LIBTOOL" $srcdir/configure.ac >/dev/null) && {
(libtool --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`libtool' installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
}
(grep "^AM_GLIB_GNU_GETTEXT" $srcdir/configure.ac >/dev/null) && {
(grep "sed.*POTFILES" $srcdir/configure.ac) > /dev/null || \
(glib-gettextize --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`glib' installed."
echo "You can get it from: ftp://ftp.gtk.org/pub/gtk"
DIE=1
}
}
(automake --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: You must have \`automake' installed."
echo "You can get it from: ftp://ftp.gnu.org/pub/gnu/"
DIE=1
NO_AUTOMAKE=yes
}
# if no automake, don't bother testing for aclocal
test -n "$NO_AUTOMAKE" || (aclocal --version) < /dev/null > /dev/null 2>&1 || {
echo
echo "**Error**: Missing \`aclocal'. The version of \`automake'"
echo "installed doesn't appear recent enough."
echo "You can get automake from ftp://ftp.gnu.org/pub/gnu/"
DIE=1
}
if test "$DIE" -eq 1; then
exit 1
fi
if test -z "$*"; then
echo "**Warning**: I am going to run \`configure' with no arguments."
echo "If you wish to pass any to it, please specify them on the"
echo \`$0\'" command line."
echo
fi
case $CC in
xlc )
am_opt=--include-deps;;
esac
for coin in `find $srcdir -path $srcdir/CVS -prune -o -name configure.ac -print`
do
dr=`dirname $coin`
if test -f $dr/NO-AUTO-GEN; then
echo skipping $dr -- flagged as no auto-gen
else
echo processing $dr
( cd $dr
aclocalinclude="$ACLOCAL_FLAGS"
if grep "^AM_GLIB_GNU_GETTEXT" configure.ac >/dev/null; then
echo "Creating $dr/aclocal.m4 ..."
test -r $dr/aclocal.m4 || touch $dr/aclocal.m4
echo "Running glib-gettextize... Ignore non-fatal messages."
echo "no" | glib-gettextize --force --copy
echo "Making $dr/aclocal.m4 writable ..."
test -r $dr/aclocal.m4 && chmod u+w $dr/aclocal.m4
fi
if grep "^IT_PROG_INTLTOOL" configure.ac >/dev/null; then
echo "Running intltoolize..."
intltoolize --copy --force --automake
fi
if grep "^AM_PROG_XML_I18N_TOOLS" configure.ac >/dev/null; then
echo "Running xml-i18n-toolize..."
xml-i18n-toolize --copy --force --automake
fi
if grep "^AM_PROG_LIBTOOL" configure.ac >/dev/null; then
if test -z "$NO_LIBTOOLIZE" ; then
echo "Running libtoolize..."
libtoolize --force --copy
fi
fi
echo "Running aclocal $aclocalinclude ..."
aclocal $aclocalinclude
if grep "^AM_CONFIG_HEADER" configure.ac >/dev/null; then
echo "Running autoheader..."
autoheader
fi
#echo "Running gnome-doc-prepare..."
#gnome-doc-prepare --force --copy
echo "Running automake --gnu $am_opt ..."
automake --add-missing --gnu $am_opt
echo "Running autoconf ..."
autoconf
)
fi
done
# generate the pot files
echo Running intltool-update ...
cd po
intltool-update --maintain >/dev/null
intltool-update --pot
cd - >/dev/null
conf_flags="--enable-maintainer-mode"
if test x$NOCONFIGURE = x; then
echo Running $srcdir/configure $conf_flags "$@" ...
$srcdir/configure $conf_flags "$@" \
&& echo Now type \`make\' to compile. || exit 1
else
echo Skipping configure process.
fi
|
tapule/BMonkey
|
autogen.sh
|
Shell
|
gpl-3.0
| 4,736 |
#!/bin/bash
echo "Updating GRUB settings, please wait..."
grub=$((ls /usr/sbin | grep 'grub2-install') || exit 0)
if [ -z "$grub" ]
then
grub=$((ls /usr/sbin | grep 'grub-install') || exit 0)
if [ -z "$grub" ]
then
echo "No tools for GRUB was found."
exit 0
else
sudo grub-install /dev/sda
sudo grub-mkconfig -o /boot/grub/grub.cfg
fi
else
sudo grub2-install /dev/sda
sudo grub2-mkconfig -o /boot/grub2/grub.cfg
fi
echo "Finished."
|
NJUOPEN/Quick-Linux-Clone
|
user_config/update_grub.sh
|
Shell
|
gpl-3.0
| 496 |
#!/bin/bash
python manage.py graph_models employees organizations projects funding_programs -g -o projects_morelab_models.png
|
OscarPDR/projects_morelab
|
projects_morelab/scripts/generate_model_graph.sh
|
Shell
|
gpl-3.0
| 127 |
#!/bin/bash
# Temporary upgrade script for the panel until panel becomes more self-sufficient
if [[ -f /install/.panel.lock ]]; then
if ! dpkg -s acl > /dev/null 2>&1; then
echo_progress_start "Modifying ACLs for swizzin group to prevent panel issues"
apt_install acl
setfacl -m g:swizzin:rx /home/*
echo_progress_done
fi
cd /opt/swizzin/swizzin
#git reset HEAD --hard
echo_progress_start "Pulling new commits"
git pull 2> /dev/null || { PANELRESET=1; }
if [[ $PANELRESET == 1 ]]; then
echo_warn "Working around unclean git repo"
git fetch origin master
cp -a core/custom core/custom.tmp
git reset --hard origin/master
mv core/custom.tmp/* core/custom/
rm -r core/custom.tmp
fi
echo_progress_done "Commits pulled"
echo_progress_start "Restarting Panel"
systemctl restart panel
echo_progress_done "Done!"
fi
|
liaralabs/swizzin
|
scripts/upgrade/panel.sh
|
Shell
|
gpl-3.0
| 872 |
#!/bin/bash
yum -y install curl-devel expat-devel gettext-devel openssl-devel zlib-devel
yum -y install gcc perl-ExtUtils-MakeMaker
yum -y remove git
cd /usr/src
wget https://github.com/git/git/archive/v2.5.0.tar.gz
tar -zxvf v2.5.0.tar.gz
cd git-2.5.0
make prefix=/usr/local/git all
make prefix=/usr/local/git install
echo "export PATH=$PATH:/usr/local/git/bin" >> /etc/bashrc
source /etc/bashrc
rm -rf git-2.5.0
rm -rf v2.5.0.tar.gz
|
vForce825/serverUtils
|
git.sh
|
Shell
|
gpl-3.0
| 436 |
#!/bin/sh
# update the base system
apk update && apk upgrade
# we need pyhon and git, move along
apk add python git
# add a non-root user and group called "nzbget" with no password (-D), no home dir (-H), no shell (-s /bin/false), and gid/uid set to 1000
addgroup -g 1000 headphones && adduser -H -D headphones -G headphones -s /bin/false -u 1000
# create the dirs and volume mount points
mkdir /config /data /downloads /music
# download the latest headphones version to /headphones
git clone https://github.com/rembo10/headphones.git
# add the first and currently only entry to our config file to accept requests from all IPs
printf "[General]\nhttp_host = 0.0.0.0" > /config/headphones.ini
# change the owner accordingly
chown -R headphones:headphones /headphones /config /data /downloads /music
# also, clear the apk cache:
rm -rf /var/cache/apk/*
|
pwntr/headphones-alpine-docker
|
setup/setup.sh
|
Shell
|
gpl-3.0
| 859 |
_get_commands() {
n=$(spritzle --help | grep -n 'Commands:' | cut -d ':' -f 1)
let n=n+1
cmds=$(spritzle --help | tail -n+$n |cut -d ' ' -f 3|xargs)
}
_spritzle() {
local cur=${COMP_WORDS[COMP_CWORD]}
local cmd=${COMP_WORDS[COMP_CWORD-1]}
if [[ $COMP_CWORD -eq 1 ]]; then
_get_commands
COMPREPLY=($(compgen -W "${cmds}" -- "${cur}"))
return 0
fi
case "$cmd" in
remove)
w=$(spritzle list --no-header -f info_hash | xargs)
COMPREPLY=($(compgen -W "${w}" -- "${cur}"))
return 0
;;
esac
}
complete -F _spritzle spritzle
|
spritzle/spritzle
|
scripts/complete.sh
|
Shell
|
gpl-3.0
| 546 |
#!/bin/sh
# $XTermId: fonts.sh,v 1.10 2003/05/19 00:52:30 tom Exp $
# -----------------------------------------------------------------------------
# this file is part of xterm
#
# Copyright 1999-2002,2003 by Thomas E. Dickey
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE ABOVE LISTED COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name(s) of the above copyright
# holders shall not be used in advertising or otherwise to promote the
# sale, use or other dealings in this Software without prior written
# authorization.
# -----------------------------------------------------------------------------
# Demonstrate the use of dynamic colors by setting the background successively
# to different values.
ESC=""
CMD='echo'
OPT='-n'
SUF=''
TMP=/tmp/xterm$$
eval '$CMD $OPT >$TMP || echo fail >$TMP' 2>/dev/null
( test ! -f $TMP || test -s $TMP ) &&
for verb in printf print ; do
rm -f $TMP
eval '$verb "\c" >$TMP || echo fail >$TMP' 2>/dev/null
if test -f $TMP ; then
if test ! -s $TMP ; then
CMD="$verb"
OPT=
SUF='\c'
break
fi
fi
done
rm -f $TMP
exec </dev/tty
old=`stty -g`
stty raw -echo min 0 time 5
$CMD $OPT "${ESC}]50;?${SUF}" > /dev/tty
read original
stty $old
original="${original}${SUF}"
if ( trap "echo exit" EXIT 2>/dev/null ) >/dev/null
then
trap '$CMD $OPT "$original" >/dev/tty; exit' EXIT HUP INT TRAP TERM
else
trap '$CMD $OPT "$original" >/dev/tty; exit' 0 1 2 5 15
fi
F=1
D=1
T=6
while true
do
$CMD $OPT "${ESC}]50;#$F${SUF}" >/dev/tty
#sleep 1
if test .$D = .1 ; then
test $F = $T && D=-1
else
test $F = 1 && D=1
fi
F=`expr $F + $D`
done
|
chriskmanx/qmole
|
QMOLEDEV/xterm-270/vttests/fonts.sh
|
Shell
|
gpl-3.0
| 2,700 |
#!/bin/sh
/usr/bin/python3 config.py
|
loehnertj/bsbgateway
|
bsbgateway.sh
|
Shell
|
gpl-3.0
| 37 |
#!/bin/bash
# Verificar que se ejecute el script con root
if [ "$(id -u)" != "0" ]; then
echo "Debe ser root para ejecutar los script." 1>&2
exit 1
fi
if [ "$(hostname)" != "controller" ]; then
echo "
##################################################################################################
Este script solo se debe ejecutar en el nodo controller
##################################################################################################"
exit 1
fi
if [ -f ./.launch-instance ];then
echo "
##################################################################################################
Usted ya ejecuto este script debe continuar con le recomendamos que lo haga manual o desde Horizon
##################################################################################################
"
exit 0
fi
if [ ! -f ./.nova-network ];then
echo "
##################################################################################################
No puede hacer este paso debe ejecutar primero
'. ./openstack-.nova-network.sh'
##################################################################################################
"
exit 1
fi
if [ ! -f ./password-table.sh ] || [ ! -f ./admin-openrc.sh ];then
echo "
##################################################################################################
No puede hacer este paso debe asegurar que tenga los archvios
./password-table.sh y ./admin-openrc.sh
##################################################################################################
"
exit 1
fi
source ./password-table.sh
# Generamos una llave
ssh-keygen
# Agregamos la llave publica al Enviroment de OpenStack
echo -e '\n\e[33;1m Pulse Enter para continuar...!!! \e[m'
nova keypair-add --pub-key ~/.ssh/id_rsa.pub demo-key
# Verificamos la llave publica que se haya agregara
echo -e '\n\e[33;1m Debe ver que se agregara una llave publica, llamada demo-key \e[m'
nova keypair-list
# Listamos los sabores disponibles
echo -e '\n\e[33;1m Flavors disponibles \e[m'
nova flavor-list
# Listamos las imagenes disponibles
echo -e '\n\e[33;1m Images disponibles \e[m'
nova image-list
NAME_IMAGE=$(nova image-list | awk '/cirros/ {print $4}')
# Listamos las redes disponibles
echo -e '\n\e[33;1m Redes disponibles \e[m'
nova net-list
DEMO_NET_ID=$(nova net-list | awk '/demo/ {print $2}')
# Listamos los grupos de seguridad disponibles
echo -e '\n\e[33;1m Grupos de seguridad disponibles \e[m'
nova secgroup-list
# Lanzamos la instancia, esto demora...!!!
nova boot --flavor m1.tiny --image $NAME_IMAGE --nic net-id=$DEMO_NET_ID \
--security-group default --key-name demo-key demo-instance1
sleep 7
# Chequeamos el estatus de las instancias
echo -e '\n\e[33;1m Chequear el estatus de las instancias \e[m'
# Para haceder a la instancia usamos un consola virtual
echo -e '\n\e[33;1m Para acceder a la instancia usamos un consola virtual, capture la URL \e[m'
nova get-vnc-console demo-instance1 novnc
nova list
# Para acceder a la instancia de forma remota
echo -e '\n\e[33;1m Para acceder a la instancia de forma remota \e[m'
nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
# Permitir la conexiones ssh
echo -e '\n\e[33;1m Permitir la conexiones ssh \e[m'
nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
touch ./.launch-instance
echo "
##################################################################################################
La URL que capturo, si no la capturo ejecute 'nova get-vnc-console demo-instance1 novnc'
puede colocarla en un navegador que tenga acceso a controller para que haga la
conexion con la instancia.
Luego desde el Host abre un navegador y coloca la URL que capturo.
tambien puede establecer conexion ssh con la instancia
ejecute esto en el nodo controller 'nova list'
obtenga la IP de la instancia
ejecute esto en el nodo compute1 'ssh [email protected]'
Puede ahora continuar con '. ./openstack-horizon.sh'
##################################################################################################
"
|
cgomeznt/openstack
|
scripts/openstack-launch-instance.sh
|
Shell
|
gpl-3.0
| 4,022 |
#! /bin/sh
gcc simplest_ffmpeg_player.cpp -g -o simplest_ffmpeg_player.out \
-I /usr/local/include -L /usr/local/lib -lSDL2main -lSDL2 -lavformat -lavcodec -lavutil -lswscale
|
YuxuanLing/trunk
|
trunk/code/study/ffmpeg/simplest_ffmpeg_player/simplest_ffmpeg_player/compile_gcc.sh
|
Shell
|
gpl-3.0
| 175 |
#!/bin/bash
VSBUTIL=../../../utility/vsbutil/vsbutil.py
$VSBUTIL dfu > /dev/null
./serialize.py $1 && \
sleep 1 && \
make dfu && \
sleep 1 && \
$VSBUTIL wipeconfig && \
echo && echo && echo && \
$VSBUTIL setkeys shift+h e l l o space t h e r e shift+1 enter && \
echo -n "Button test> " && \
read vsbsays && \
if [ "$vsbsays" = "Hello there!" ]
then
echo "Success! Re-wiping config."
$VSBUTIL eepwrite 0 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff && \
$VSBUTIL wipeconfig && \
$VSBUTIL loadconfig && \
$VSBUTIL getconfig && \
echo && echo SERIAL: `$VSBUTIL getserial`
else
echo "Failure?"
fi
|
gregcourville/VerySeriousButton-firmware
|
VerySeriousButton/init_vsb.sh
|
Shell
|
gpl-3.0
| 658 |
#!/usr/bin/env bash
folder=$1
#tmpfile=$(mktemp /dev/shm/tmp.XXXXXXXXXXX.out)
tmpfile=$(mktemp)
cd $folder
ls $folder | grep -v error | xargs -n 32 -P 8 cat $folder/ 2> /dev/null | awk 'NR==1 || NR%2==0' > $tmpfile
cd -
folder=$2
tmpfile2=$(mktemp)
cd $folder
ls $folder | grep -v error | xargs -n 32 -P 8 cat $folder/ 2> /dev/null | awk 'NR==1 || NR%2==0' > $tmpfile2
cd -
./decompose_stats.r $tmpfile $tmpfile2
ret=$?
rm $tmpfile
rm $tmpfile2
exit $ret
|
daajoe/asp_param_exp
|
analyze/decompose_stats.sh
|
Shell
|
gpl-3.0
| 458 |
# This file is part of the Aloofix project.
# Copyright (C) 2013 Kevin Johnson <[email protected]>
# Distributed under the terms of the GNU General Public License version 3,
# or (at your option) any later version.
name=libffi
version=3.0.13
sequence=1
site=ftp://sourceware.org/pub/$name
description="A portable Foreign Function Interface library"
depends=
section=libraries
license=MIT
license_files=LICENSE
extra_doc_files="README ChangeLog*"
extra_dev_pkg_files="usr/lib/${name}-${version}"
configure() {
# libffi configure fails if called with an absolute path for configure
./configure CFLAGS="$cflags" $configure_args
}
|
aloofschipperke/aloofix
|
specs/pkgs/libffi/spec.sh
|
Shell
|
gpl-3.0
| 645 |
#!/bin/bash
DATA="fe00::1"
if ! fw-admin -a $DATA ; then
echo "E: Unable to add $DATA"
exit 1
fi
if ! fw-admin -i $DATA ; then
echo "E: $DATA not stored"
exit 1
fi
exit 0
|
aborrero/fw-admin
|
tests/testfiles/007_0_add.sh
|
Shell
|
gpl-3.0
| 179 |
#!/bin/bash
#
# ExtractZip - a script to extract Zips into folder
#
# By Nicola Ferralis <[email protected]>
#
# This is script is licensed throughthe GNU Public license v.2.0
#
version="20180110a"
if [ "$1" = "" -o "$1" = "-h" ]; then
echo
echo " ExtractZip v."$version
echo " Usage:"
echo " extractzip.sh <destination folder>"
echo
else
echo " Create folder: "$1
mkdir $1
pathfiles=./
for i in $( ls $pathfiles );
do
if [ "${i##*.}" = "zip" ]; then
unzip $i -d $1
fi
echo
echo " Done!"
echo
done
fi
|
feranick/SpectralMachine
|
Utilities/extractzip.sh
|
Shell
|
gpl-3.0
| 595 |
#!/bin/bash
# This script creates a new domain directory.
#
# Author: George Borisov <[email protected]>
set -o errexit
set -o nounset
BASE_DIR=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd)
. $BASE_DIR/lib/common.sh
initEnv $BASE_DIR
DOMAIN=${1:-}
CSR_SUBJECT=${CSR_SUBJECT:-}
while [ -z $DOMAIN ]; do
echo -n "Enter a domain: "
read DOMAIN
done
if [ ! -e "$BASE_DIR/data/$DOMAIN" ]; then
echo 'ERROR: domain is not defined'
exit 1
fi
if ! command -pv openssl >/dev/null 2>&1; then
echo "ERROR: OpenSSL not found"
exit 1
fi
cd "$BASE_DIR/data/$DOMAIN"
. "$BASE_DIR/config"
. config
function newKey {
local KEY_LENGTH=${1:-2048}
openssl genpkey -out key.pem -algorithm RSA -pkeyopt rsa_keygen_bits:$KEY_LENGTH
}
function newCSR {
local SUBJECT=${1:-}
if [ -n "$SUBJECT" ]; then
openssl req -new -out csr.pem -sha256 -key key.pem -subj "$SUBJECT"
else
openssl req -new -out csr.pem -sha256 -key key.pem
fi
}
function makeCert {
echo -n "Certificate lifetime (in years) [1]: "
read LINE
local YEARS=${LINE:-1}
openssl req -x509 -in csr.pem -out cert.pem -days $((YEARS*365)) -key key.pem
}
if [ -e ssl ]; then
cd ssl
while [ true ]; do
echo "1 - New private key and CSR"
echo "2 - Make a self-signed certificate (not-recommended)"
echo "0 - Do nothing"
echo
echo -n "Your choice: "
read LINE
if [ $LINE -eq 1 ]; then
newKey $CONFIG_SSL_KEY_LENGTH
newCSR "$CSR_SUBJECT"
break
elif [ $LINE -eq 2 ]; then
makeCert
break
elif [ $LINE -eq 0 ]; then
exit 0
fi
done
else
echo "Generating a new SSL private key and CSR."
echo "You will also need a certificate (save to cert.pem)."
mkdir -p ssl
cd ssl
newKey
newCSR "$CSR_SUBJECT"
fi
exit 0
|
borisovg/nginx-config
|
bin/keygen.sh
|
Shell
|
gpl-3.0
| 1,725 |
#!/bin/bash -v
java --version
time java -XX:+UseSerialGC -server -XX:CompileThreshold=2 CardRaytracer /dev/null
|
Mark-Kovalyov/CardRaytracerBenchmark
|
java/run.sh
|
Shell
|
gpl-3.0
| 112 |
#!/bin/bash
# bash x.sh 6
if [ $# -eq 0 ]; then
echo "bash x.sh <END>"
exit
fi
END=$1
echo 0 > out/spades-length0.txt
bioawk -c fastx '{ print $name, length($seq) }' < out/spades-bwa/scaffolds.fasta | cut -f 2 >> out/spades-length0.txt
echo 1 > out/spades-length1.txt
bioawk -c fastx '{ print $name, length($seq) }' < out/spades-bwa-jdj/scaffolds.fasta | cut -f 2 >> out/spades-length1.txt
for ((I=2;I<=END;I++)); do
echo $I > out/spades-length$I.txt
bioawk -c fastx '{ print $name, length($seq) }' < out/spades-bwa-jdj$I/scaffolds.fasta | cut -f 2 >> out/spades-length$I.txt
done
paste out/spades-length?.txt > out/spades-length.txt
rm out/spades-length?.txt
echo See out/spades-length.txt
|
goshng/swu-seq
|
src/sh/r-spades-length.sh
|
Shell
|
gpl-3.0
| 704 |
#---------------------------------------------------------------------
# Function: InstallWebServer Ubuntu 16.04
# Install and configure Apache2, php + modules
#---------------------------------------------------------------------
InstallWebServer() {
if [ "$CFG_WEBSERVER" == "apache" ]; then
CFG_NGINX=n
CFG_APACHE=y
echo -n "Installing Web server (Apache) and modules... "
echo "phpmyadmin phpmyadmin/reconfigure-webserver multiselect apache2" | debconf-set-selections
# - DISABLED DUE TO A BUG IN DBCONFIG - echo "phpmyadmin phpmyadmin/dbconfig-install boolean false" | debconf-set-selections
echo "dbconfig-common dbconfig-common/dbconfig-install boolean false" | debconf-set-selections
# apt_install apache2 apache2-doc apache2-utils libapache2-mod-php libapache2-mod-fastcgi libapache2-mod-fcgid apache2-suexec-pristine libapache2-mod-python php7.0-fpm libruby
apt_install apache2 apache2-doc apache2-utils libapache2-mod-php libapache2-mod-fcgid apache2-suexec-pristine libruby libapache2-mod-python
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing PHP and modules... "
# apt_install php7.0 php7.0-common php7.0-gd php7.0-dev php7.0-mysqlnd php7.0-imap php7.0-cli php7.0-cgi php-pear php-auth php7.0-mcrypt php7.0-curl php7.0-intl php7.0-pspell php7.0-recode php7.0-sqlite3 php7.0-tidy php7.0-xmlrpc php7.0-xsl php-memcached php-imagick php-gettext php7.0-zip php7.0-mbstring php7.0-opcache php-apcu php7.0-bz2 php-redis
apt_install php7.0 php7.0-common php7.0-gd php7.0-mysql php7.0-imap php7.0-cli php7.0-cgi php-pear php-auth php7.0-mcrypt php7.0-curl php7.0-intl php7.0-pspell php7.0-recode php7.0-sqlite3 php7.0-tidy php7.0-xmlrpc php7.0-xsl php-memcache php-imagick php-gettext php7.0-zip php7.0-mbstring
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing APCu... "
apt_install php7.0-opcache php-apcu
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing PHP-FPM... "
apt_install libapache2-mod-fastcgi php7.0-fpm
echo -e "[${green}DONE${NC}]\n"
echo -n "Disabling HTTP_PROXY... "
echo "<IfModule mod_headers.c>
RequestHeader unset Proxy early
</IfModule>" | tee /etc/apache2/conf-available/httpoxy.conf > /dev/null 2>&1
a2enconf httpoxy > /dev/null 2>&1
echo -e "[${green}DONE${NC}]\n"
if [ "$CFG_PHPMYADMIN" == "yes" ]; then
echo "==========================================================================================="
echo "Attention: When asked 'Configure database for phpmyadmin with dbconfig-common?' select 'NO'"
echo "Due to a bug in dbconfig-common, this can't be automated."
echo "==========================================================================================="
echo "Press ENTER to continue... "
read DUMMY
echo -n "Installing phpMyAdmin... "
apt-get -y install phpmyadmin
echo -e "[${green}DONE${NC}]\n"
fi
echo -n "Activating Apache modules... "
a2enmod suexec > /dev/null 2>&1
a2enmod rewrite > /dev/null 2>&1
a2enmod ssl > /dev/null 2>&1
a2enmod actions > /dev/null 2>&1
a2enmod include > /dev/null 2>&1
a2enmod cgi > /dev/null 2>&1
a2enmod dav_fs > /dev/null 2>&1
a2enmod dav > /dev/null 2>&1
a2enmod auth_digest > /dev/null 2>&1
a2enmod headers > /dev/null 2>&1
a2enmod fastcgi > /dev/null 2>&1
a2enmod alias > /dev/null 2>&1
# a2enmod fcgid > /dev/null 2>&1
echo -e "[${green}DONE${NC}]\n"
echo -n "Restarting Apache... "
service apache2 restart
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing needed programs for PHP and Apache (mcrypt, etc.)... "
apt_install mcrypt imagemagick memcached curl tidy snmp redis-server
echo -e "[${green}DONE${NC}]\n"
elif [ "$CFG_WEBSERVER" == "nginx" ]; then
CFG_NGINX=y
CFG_APACHE=n
echo -n "Installing Web server (nginx) and modules... "
service apache2 stop
hide_output update-rc.d -f apache2 remove
apt_install nginx
service nginx start
echo -e "[${green}DONE${NC}]\n"
# apt_install php7.0-opcache php7.0-fpm php7.0 php7.0-common php7.0-gd php7.0-mysql php7.0-imap php7.0-cli php7.0-cgi php-pear php-auth php7.0-mcrypt mcrypt imagemagick libruby php7.0-curl php7.0-intl php7.0-pspell php7.0-recode php7.0-sqlite3 php7.0-tidy php7.0-xmlrpc php7.0-xsl memcached php-memcache php-imagick php-gettext php7.0-zip php7.0-mbstring php7.0-opcache php-apcu
echo -n "Installing PHP-FPM... "
apt_install php7.0-fpm
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing PHP and modules... "
apt_install php7.0-opcache php7.0 php7.0-common php7.0-gd php7.0-mysql php7.0-imap php7.0-cli php7.0-cgi php-pear php-auth php7.0-mcrypt libruby php7.0-curl php7.0-intl php7.0-pspell php7.0-recode php7.0-sqlite3 php7.0-tidy php7.0-xmlrpc php7.0-xsl php-memcache php-imagick php-gettext php7.0-zip php7.0-mbstring
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing APCu... "
apt_install php-apcu
sed -i "s/;cgi.fix_pathinfo=1/cgi.fix_pathinfo=0/" /etc/php/7.0/fpm/php.ini
TIME_ZONE=$(echo "$TIME_ZONE" | sed -n 's/ (.*)$//p')
sed -i "s/;date.timezone =/date.timezone=\"${TIME_ZONE//\//\\/}\"/" /etc/php/7.0/fpm/php.ini
echo -e "[${green}DONE${NC}]\n"
echo -n "Reloading PHP-FPM... "
service php7.0-fpm reload
echo -e "[${green}DONE${NC}]\n"
echo -n "Installing fcgiwrap... "
apt_install fcgiwrap
echo -e "[${green}DONE${NC}]\n"
echo "phpmyadmin phpmyadmin/reconfigure-webserver multiselect none" | debconf-set-selections
if [ "$CFG_PHPMYADMIN" == "yes" ]; then
echo "==========================================================================================="
echo "Attention: When asked 'Configure database for phpmyadmin with dbconfig-common?' select 'NO'"
echo "Due to a bug in dbconfig-common, this can't be automated."
echo "==========================================================================================="
echo "Press ENTER to continue... "
read DUMMY
echo -n "Installing phpMyAdmin... "
apt-get -y install phpmyadmin
echo "With nginx phpMyAdmin is accessibile at http://$CFG_HOSTNAME_FQDN:8081/phpmyadmin or http://${IP_ADDRESS[0]}:8081/phpmyadmin"
echo -e "[${green}DONE${NC}]\n"
fi
echo -n "Installing needed programs for PHP and nginx (mcrypt, etc.)... "
apt_install mcrypt imagemagick memcached curl tidy snmp redis-server
echo -e "[${green}DONE${NC}]\n"
phpenmod mcrypt
phpenmod mbstring
fi
# if [ "$CFG_XCACHE" == "yes" ]; then
# echo -n "Installing XCache... "
# apt_install php7-xcache
# echo -e "[${green}DONE${NC}]\n"
# fi
echo -n "Installing Let's Encrypt (letsencrypt)... "
apt_install letsencrypt
echo -e "[${green}DONE${NC}]\n"
}
|
servisys/ispconfig_setup
|
distros/ubuntu-16.04/install_webserver.sh
|
Shell
|
gpl-3.0
| 6,510 |
#!/bin/bash
#
#
clear
echo -e 'source /etc/network/interfaces.d/*\n\nauto lo\niface lo inet loopback' > /etc/network/interfaces
echo 'Configurador de rede por DHCP 1.0'
echo 'Script por Eduardo Medeiros & Fabricio Prado'
echo ''
echo 'Definindo nome do computador...'
echo 'FABRICIO_PRADO' > /etc/hostname
hostname FABRICIO_PRADO
echo ''
echo 'Listando as placas de rede ativas...'
ip link | grep BROADCAST | awk '{print $2}' | tr -d ':'
echo ''
echo -n 'Escreva o nome da interface: '
read intrede
echo -e "auto $intrede\niface $intrede inet dhcp" > /etc/network/interfaces.d/$intrede
echo ''
echo 'Listando arquivo de configuração da interface...'
echo ''
cat /etc/network/interfaces.d/$intrede
ifdown $intrede
ifup $intrede
ip addr
|
FabricioPrado/trabalho_tumelero_2109
|
configura_rede.sh
|
Shell
|
gpl-3.0
| 738 |
#!/bin/bash
ROOT_UID=0
NOTROOT=87
# Check if user is root
if [ $UID -ne $ROOT_UID ]
then echo “You must be root to run this script.”
exit $NOTROOT
fi
display_help() {
echo "Usage: $0 [option= ...] " >&2
echo
echo " -h, --help Show this help"
echo " -v, --vhost Apache virtual host name"
echo " -n, --newvhost New Apache virtual host name"
echo
exit 1
}
VHOST_AVAILABLE=/etc/apache2/sites-available/
for i in "$@"
do
case $i in
-n=*|--newvhost=*)
NEWVHOST="${i#*=}"
;;
-v=*|--vhost=*)
VHOST="${i#*=}"
;;
-h=*|--help=*)
display_help
;;
*)
# unknown option
display_help
;;
esac
done
vdomain_file_name=`grep "ServerName $VHOST" $VHOST_AVAILABLE* | grep -v -e "#"`
[ -z "$vdomain_file_name" ] && echo "Source virtual host is not found!" && exit 1
vdomain_file_name=${vdomain_file_name%:*}
# copy as new virtual name file
cp $vdomain_file_name $VHOST_AVAILABLE$NEWVHOST.conf
sed -i 's/ServerName $VHOST/ServerName $NEWVHOST/g' $VHOST_AVAILABLE$NEWVHOST.conf
# Change error.log path
_s="$VHOST-error.log"
_r="$NEWVHOST-error.log"
sed -i "s/${_s}/${_r}/g" $VHOST_AVAILABLE$VHOST".conf"
# Change access.log path
_s="$VHOST-access.log"
_r="$NEWVHOST-access.log"
sed -i "s/${_s}/${_r}/g" $VHOST_AVAILABLE$VHOST".conf"
# 4. Enable new host
a2ensite $NEWVHOST
# 5. Update host file
echo -e "127.0.1.1\t$NEWVHOST" >> /etc/hosts
# 6. Restart apache
/etc/init.d/apache2 reload
echo "Host $VHOST cloned to $NEWVHOST."
exit;
|
bgerp/bgerp-install
|
a2clonevhost.sh
|
Shell
|
gpl-3.0
| 1,567 |
#!/bin/bash
# Test Erlang.g4 grammar against Erlang/OTP's *.erl sources
# Note: a preprocessing pass has to be done beforehand (creates *.P files)
get() {
URL="$1" ; SRC="$2"
which wget 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
wget $URL -O maint
else
curl -O $URL
fi
unzip maint
rm -f maint
mv otp-maint/ $SRC/
}
SRC="$1"
PPED="$2"
PPED=${PPED:="${SRC}/preprocessed"}
[[ ! -d $SRC/ ]] && get 'https://codeload.github.com/erlang/otp/zip/maint' $SRC
O=$SRC/output.txt
tot=$(echo $(find $SRC -name '*.erl' | wc -l))
echo -n "${tot} files to test. (stop with both ^C and ^G). SKIPPED <> preprocessing failed. "
[[ $tot -gt 100 ]] && printf "\e[1;4m%s\e[0m" 'This will take a while!'
[[ $tot -eq 0 ]] && printf "\e[1;4m%s\n\e[0m" "No $SRC/'*.erl' files to check on!" && exit 1
echo
j=$tot ; oks=0 ; skd=0
for file in `find $SRC -name '*.erl'`; do
## Remove random appearances of ‘’.
#sed -i 's///g' $file
COLUMNS=$(tput cols)
dir=`dirname $file`
ppdir=$PPED
mkdir -p $ppdir
pped=$ppdir/`basename ${file%.erl}.P`
## Preprocessor
erlc -I $dir/../include -I ${file%.erl}_data/ -o $ppdir -P $file 1>/dev/null 2>/dev/null
if [ $? -eq 0 ]; then
## Syntax
java org.antlr.v4.gui.TestRig Erlang forms -encoding utf8 $pped 1>$O 2>>$O
if [ -s $O ]; then # $O is not empty
printf "%s \033[0;33m%s\033[00m\033[0;31m%$(($COLUMNS - ${#file} -${#j} -1))s\033[00m\n" $j $pped ERROR
cat $O
else # $O is empty
printf "%s \033[0;33m%s\033[00m\033[0;32m%$(($COLUMNS - ${#file} -${#j} -1))s\033[00m\n" $j $pped PASSED
((oks++))
fi
else # Preprocessing failed
# Reasons: some of the files to include are not built yet (C code…) ; -I option is wrong (unlikely).
printf "%s \033[0;33m%s\033[00m\033[0;34m%$(($COLUMNS - ${#file} -${#j} -1))s\033[00m\n" $j $file SKIPPED
((skd++))
((oks++))
fi
((j--))
done
rm -f $O
echo
lhs="$(($tot - $oks)) errors, checked: $tot, skipped: $skd"
printf "\e[1;4m%s\e[0m\033[1;1m%$(($COLUMNS - ${#lhs}))s\033[00m\n" "$lhs" "$((100 * $oks / $tot))%"
|
iazarny/gitember
|
src/main/java/com/az/gitember/controller/lang/erlang/check.sh
|
Shell
|
gpl-3.0
| 2,267 |
#!/bin/bash
#MAPROOT='../../maps/'
MAPROOT="../maps/"
MAPFILES=(
$MAPROOT"fernflower.dmm"
$MAPROOT"efficiency.dmm"
)
for MAPFILE in "${MAPFILES[@]}"
do
echo "Processing $MAPFILE..."
git show HEAD:$MAPFILE > tmp.dmm
java -jar MapPatcher.jar -clean tmp.dmm $MAPFILE $MAPFILE
#dos2unix -U '../../maps/'$MAPFILE
rm tmp.dmm
echo "----------------------"
continue
done
|
BlueCabaret/Wonderwork
|
maptools/clean_map_git.sh
|
Shell
|
gpl-3.0
| 374 |
#!/bin/sh
echo "Deleting all npgsql_tests_* databases..."
drop_ok=1
for dbname in `${PSQL} -U ${NPGSQL_UID} -h 127.0.0.1 -d ${NPGSQL_template_DB} -c "select datname from pg_database;" |grep ${NPGSQL_DB_PREFIX}`;
do
echo -n "Deleting database ${dbname}..." && (${PSQL} -U ${NPGSQL_UID} -h ${NPGSQL_HOST} -d ${NPGSQL_template_DB} -c "drop database ${dbname} ;" > /dev/null 2>&1 && echo "OK" ) || (echo "FAILED" && exit 1);
done
|
anonymouskeyboard47/Tripodmaps
|
Npgsql2.0.11.src/Npgsql2.0.11.src/testsuite/noninteractive/cleanDBs.sh
|
Shell
|
gpl-3.0
| 437 |
#!/bin/sh
export AS_NAME="ProcessManagerGroup"
JAVA_OPTS=""
# Init ANKA Servers default paramters
. "$(dirname "$0")/_init.sh"
JAVA_OPTS="$JAVA_OPTS -Dlog4j.debug=false"
JAVA_OPTS="$JAVA_OPTS -DAppServer.init=AppServer-procmangr.properties"
$AS_JAVA -classpath $AS_CLASSPATH $JAVA_OPTS com.kriznar.csshell.epics.server.Server
|
ANKA-CS/ANKA-Servers
|
sh/ProcessManagerGroup.sh
|
Shell
|
gpl-3.0
| 333 |
#!/usr/bin/with-contenv bash
set -e
cd /rpc
exec python start.rpc.py
|
LTD-Beget/sprutio-rpc
|
run-rpc.sh
|
Shell
|
gpl-3.0
| 69 |
#!/bin/bash
LIBFFI_CFLAGS="-I/usr/include" LIBFFI_LIBS="-lffi" ./configure
|
chriskmanx/qmole
|
QMOLEDEV/glib-2.29.14/configure.withffi.sh
|
Shell
|
gpl-3.0
| 75 |
#!/bin/bash
python AnalyzeSimulation.py --paralog1 YNL069C --paralog2 YIL133C --simnum 83 > YNL069C_YIL133C_MG94_nonclock_Sim83_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Simulation/ShFiles/MG94_YNL069C_YIL133C_sim83.sh
|
Shell
|
gpl-3.0
| 145 |
#!/bin/bash
# Bash script to run the Mocha / Chai tests
# (C) 2016, 2017 Dirk Stolle
# get directory of this script
THIS_DIR="${BASH_SOURCE%/*}"
# get current directory
OLD_WORKING_DIRECTORY="$(pwd)"
# change to this directory, because that is where meteor is
cd "$THIS_DIR"
if [[ $? -ne 0 ]]
then
echo "Could not change to meteor application directory!"
exit 1
fi
# get meteor executable
METEOR=$(which meteor)
if [[ -z $METEOR ]]
then
METEOR=/usr/bin/meteor
fi
if [[ ! -x $METEOR ]]
then
METEOR=~/.meteor/meteor
echo "Info: Global meteor executable was not found!"
echo "Info: Using $METEOR instead."
if [[ ! -x $METEOR ]]
then
echo "Error: $METEOR is not an executable file! Test run will be cancelled."
echo "Please check that you have installed Meteor properly."
echo -n "If you are sure that Meteor is installed, then make sure that the"
echo -n " meteor executable is in your PATH environment variable or that"
echo " $METEOR links to that executable."
exit 1
fi
fi
# Tests will be run on port 3333 instead of 3000 to make sure that any
# potential users on port 3000 will not see the tests.
$METEOR test --driver-package meteortesting:mocha --once --full-app \
--port 3333
EXITCODE_METEOR_TEST=$?
# change back to previous directory
cd "$OLD_WORKING_DIRECTORY"
if [[ $? -ne 0 ]]
then
echo "Could not change back to old working directory!"
exit 1
fi
# Return a proper exit code from the meteor test command.
exit $EXITCODE_METEOR_TEST
|
striezel/meteor-chess
|
meteorapp/tests.sh
|
Shell
|
gpl-3.0
| 1,498 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.